From 02e95a4a65c2df453dd6b8e70c3fd3c21c9257e5 Mon Sep 17 00:00:00 2001 From: arithmetic1728 Date: Fri, 7 Aug 2020 18:30:48 -0700 Subject: [PATCH 1/5] feat!: migrate to use microgen --- .coveragerc | 13 +- README.rst | 10 +- UPGRADING.md | 152 + docs/UPGRADING.md | 1 + docs/dataproc_v1/services.rst | 15 + docs/dataproc_v1/types.rst | 5 + docs/dataproc_v1beta2/services.rst | 15 + docs/dataproc_v1beta2/types.rst | 5 + docs/gapic/v1/api.rst | 6 - docs/gapic/v1/types.rst | 5 - docs/gapic/v1beta2/api.rst | 6 - docs/gapic/v1beta2/types.rst | 5 - docs/index.rst | 19 +- google/cloud/dataproc/__init__.py | 250 + google/cloud/dataproc/py.typed | 2 + google/cloud/dataproc_v1/__init__.py | 223 +- google/cloud/dataproc_v1/gapic/__init__.py | 0 .../autoscaling_policy_service_client.py | 653 -- ...utoscaling_policy_service_client_config.py | 139 - .../gapic/cluster_controller_client.py | 878 --- .../gapic/cluster_controller_client_config.py | 76 - google/cloud/dataproc_v1/gapic/enums.py | 324 - .../gapic/job_controller_client.py | 810 --- .../gapic/job_controller_client_config.py | 149 - .../dataproc_v1/gapic/transports/__init__.py | 0 ...toscaling_policy_service_grpc_transport.py | 179 - .../cluster_controller_grpc_transport.py | 204 - .../job_controller_grpc_transport.py | 212 - ...orkflow_template_service_grpc_transport.py | 249 - .../gapic/workflow_template_service_client.py | 949 --- ...workflow_template_service_client_config.py | 81 - google/cloud/dataproc_v1/proto/__init__.py | 0 .../proto/autoscaling_policies_pb2.py | 1218 ---- .../proto/autoscaling_policies_pb2_grpc.py | 267 - .../cloud/dataproc_v1/proto/clusters_pb2.py | 4713 -------------- .../dataproc_v1/proto/clusters_pb2_grpc.py | 321 - google/cloud/dataproc_v1/proto/jobs_pb2.py | 5403 ---------------- .../cloud/dataproc_v1/proto/jobs_pb2_grpc.py | 356 -- .../cloud/dataproc_v1/proto/operations_pb2.py | 515 -- .../dataproc_v1/proto/operations_pb2_grpc.py | 3 - google/cloud/dataproc_v1/proto/shared_pb2.py | 115 - .../dataproc_v1/proto/shared_pb2_grpc.py | 3 - .../proto/workflow_templates_pb2.py | 3532 ----------- .../proto/workflow_templates_pb2_grpc.py | 400 -- google/cloud/dataproc_v1/py.typed | 2 + google/cloud/dataproc_v1/services/__init__.py | 16 + .../autoscaling_policy_service/__init__.py} | 23 +- .../async_client.py | 553 ++ .../autoscaling_policy_service/client.py | 688 +++ .../autoscaling_policy_service/pagers.py | 152 + .../transports/__init__.py | 38 + .../transports/base.py | 211 + .../transports/grpc.py | 356 ++ .../transports/grpc_asyncio.py | 351 ++ .../services/cluster_controller/__init__.py | 24 + .../cluster_controller/async_client.py | 836 +++ .../services/cluster_controller/client.py | 932 +++ .../services/cluster_controller/pagers.py | 148 + .../cluster_controller/transports/__init__.py | 36 + .../cluster_controller/transports/base.py | 242 + .../cluster_controller/transports/grpc.py | 398 ++ .../transports/grpc_asyncio.py | 395 ++ .../services/job_controller}/__init__.py | 18 +- .../services/job_controller/async_client.py | 722 +++ .../services/job_controller/client.py | 805 +++ .../services/job_controller/pagers.py | 148 + .../job_controller/transports/__init__.py | 36 + .../job_controller/transports/base.py | 260 + .../job_controller/transports/grpc.py | 402 ++ .../job_controller/transports/grpc_asyncio.py | 397 ++ .../workflow_template_service/__init__.py | 24 + .../workflow_template_service/async_client.py | 865 +++ .../workflow_template_service/client.py | 980 +++ .../workflow_template_service/pagers.py | 152 + .../transports/__init__.py | 38 + .../transports/base.py | 272 + .../transports/grpc.py | 482 ++ .../transports/grpc_asyncio.py | 478 ++ google/cloud/dataproc_v1/types.py | 68 - google/cloud/dataproc_v1/types/__init__.py | 197 + .../dataproc_v1/types/autoscaling_policies.py | 365 ++ google/cloud/dataproc_v1/types/clusters.py | 1234 ++++ google/cloud/dataproc_v1/types/jobs.py | 1060 ++++ google/cloud/dataproc_v1/types/operations.py | 109 + google/cloud/dataproc_v1/types/shared.py | 37 + .../dataproc_v1/types/workflow_templates.py | 833 +++ google/cloud/dataproc_v1beta2/__init__.py | 225 +- .../cloud/dataproc_v1beta2/gapic/__init__.py | 0 .../autoscaling_policy_service_client.py | 653 -- ...utoscaling_policy_service_client_config.py | 139 - .../gapic/cluster_controller_client.py | 888 --- .../gapic/cluster_controller_client_config.py | 76 - google/cloud/dataproc_v1beta2/gapic/enums.py | 318 - .../gapic/job_controller_client.py | 810 --- .../gapic/job_controller_client_config.py | 149 - .../gapic/transports/__init__.py | 0 ...toscaling_policy_service_grpc_transport.py | 179 - .../cluster_controller_grpc_transport.py | 204 - .../job_controller_grpc_transport.py | 212 - ...orkflow_template_service_grpc_transport.py | 249 - .../gapic/workflow_template_service_client.py | 960 --- ...workflow_template_service_client_config.py | 81 - .../cloud/dataproc_v1beta2/proto/__init__.py | 0 .../proto/autoscaling_policies_pb2.py | 1211 ---- .../proto/autoscaling_policies_pb2_grpc.py | 267 - .../dataproc_v1beta2/proto/clusters_pb2.py | 4828 --------------- .../proto/clusters_pb2_grpc.py | 321 - .../cloud/dataproc_v1beta2/proto/jobs_pb2.py | 5425 ----------------- .../dataproc_v1beta2/proto/jobs_pb2_grpc.py | 356 -- .../dataproc_v1beta2/proto/operations_pb2.py | 501 -- .../proto/operations_pb2_grpc.py | 3 - .../dataproc_v1beta2/proto/shared_pb2.py | 169 - .../dataproc_v1beta2/proto/shared_pb2_grpc.py | 3 - .../proto/workflow_templates_pb2.py | 3551 ----------- .../proto/workflow_templates_pb2_grpc.py | 400 -- google/cloud/dataproc_v1beta2/py.typed | 2 + .../dataproc_v1beta2/services/__init__.py | 16 + .../autoscaling_policy_service/__init__.py | 24 + .../async_client.py | 552 ++ .../autoscaling_policy_service/client.py | 687 +++ .../autoscaling_policy_service/pagers.py | 152 + .../transports/__init__.py | 38 + .../transports/base.py | 211 + .../transports/grpc.py | 356 ++ .../transports/grpc_asyncio.py | 351 ++ .../services/cluster_controller/__init__.py | 24 + .../cluster_controller/async_client.py | 855 +++ .../services/cluster_controller/client.py | 951 +++ .../services/cluster_controller/pagers.py | 148 + .../cluster_controller/transports/__init__.py | 36 + .../cluster_controller/transports/base.py | 242 + .../cluster_controller/transports/grpc.py | 397 ++ .../transports/grpc_asyncio.py | 394 ++ .../services/job_controller}/__init__.py | 18 +- .../services/job_controller/async_client.py | 722 +++ .../services/job_controller/client.py | 805 +++ .../services/job_controller/pagers.py | 148 + .../job_controller/transports/__init__.py | 36 + .../job_controller/transports/base.py | 260 + .../job_controller/transports/grpc.py | 402 ++ .../job_controller/transports/grpc_asyncio.py | 397 ++ .../workflow_template_service/__init__.py | 24 + .../workflow_template_service/async_client.py | 865 +++ .../workflow_template_service/client.py | 980 +++ .../workflow_template_service/pagers.py | 152 + .../transports/__init__.py | 38 + .../transports/base.py | 272 + .../transports/grpc.py | 482 ++ .../transports/grpc_asyncio.py | 478 ++ google/cloud/dataproc_v1beta2/types.py | 68 - .../cloud/dataproc_v1beta2/types/__init__.py | 199 + .../types/autoscaling_policies.py | 360 ++ .../cloud/dataproc_v1beta2/types/clusters.py | 1255 ++++ google/cloud/dataproc_v1beta2/types/jobs.py | 1072 ++++ .../dataproc_v1beta2/types/operations.py | 109 + google/cloud/dataproc_v1beta2/types/shared.py | 43 + .../types/workflow_templates.py | 849 +++ mypy.ini | 3 + noxfile.py | 10 +- samples/snippets/create_cluster.py | 32 +- samples/snippets/create_cluster_test.py | 20 +- samples/snippets/dataproc_e2e_donttest.py | 11 +- .../instantiate_inline_workflow_template.py | 68 +- ...stantiate_inline_workflow_template_test.py | 4 +- samples/snippets/list_clusters.py | 43 +- samples/snippets/noxfile.py | 26 +- samples/snippets/pyspark_sort.py | 2 +- samples/snippets/pyspark_sort_gcs.py | 2 +- samples/snippets/quickstart/quickstart.py | 129 +- .../snippets/quickstart/quickstart_test.py | 38 +- samples/snippets/single_job_workflow.py | 52 +- samples/snippets/submit_job_to_cluster.py | 248 +- scripts/fixup_dataproc_v1_keywords.py | 202 + scripts/fixup_dataproc_v1beta2_keywords.py | 202 + setup.py | 20 +- synth.metadata | 7 +- synth.py | 66 +- .../v1/test_system_cluster_controller_v1.py | 5 +- .../test_system_cluster_controller_v1beta2.py | 5 +- tests/unit/gapic/dataproc_v1/__init__.py | 1 + .../test_autoscaling_policy_service.py | 2015 ++++++ .../dataproc_v1/test_cluster_controller.py | 1841 ++++++ .../gapic/dataproc_v1/test_job_controller.py | 1901 ++++++ .../test_workflow_template_service.py | 2488 ++++++++ tests/unit/gapic/dataproc_v1beta2/__init__.py | 1 + .../test_autoscaling_policy_service.py | 2015 ++++++ .../test_cluster_controller.py | 1843 ++++++ .../dataproc_v1beta2/test_job_controller.py | 1927 ++++++ .../test_workflow_template_service.py | 2488 ++++++++ ...st_autoscaling_policy_service_client_v1.py | 273 - .../v1/test_cluster_controller_client_v1.py | 414 -- .../gapic/v1/test_job_controller_client_v1.py | 431 -- ...est_workflow_template_service_client_v1.py | 378 -- ...toscaling_policy_service_client_v1beta2.py | 273 - .../test_cluster_controller_client_v1beta2.py | 413 -- .../test_job_controller_client_v1beta2.py | 441 -- ...orkflow_template_service_client_v1beta2.py | 378 -- 197 files changed, 48833 insertions(+), 47395 deletions(-) create mode 100644 UPGRADING.md create mode 120000 docs/UPGRADING.md create mode 100644 docs/dataproc_v1/services.rst create mode 100644 docs/dataproc_v1/types.rst create mode 100644 docs/dataproc_v1beta2/services.rst create mode 100644 docs/dataproc_v1beta2/types.rst delete mode 100644 docs/gapic/v1/api.rst delete mode 100644 docs/gapic/v1/types.rst delete mode 100644 docs/gapic/v1beta2/api.rst delete mode 100644 docs/gapic/v1beta2/types.rst create mode 100644 google/cloud/dataproc/__init__.py create mode 100644 google/cloud/dataproc/py.typed delete mode 100644 google/cloud/dataproc_v1/gapic/__init__.py delete mode 100644 google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py delete mode 100644 google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py delete mode 100644 google/cloud/dataproc_v1/gapic/cluster_controller_client.py delete mode 100644 google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py delete mode 100644 google/cloud/dataproc_v1/gapic/enums.py delete mode 100644 google/cloud/dataproc_v1/gapic/job_controller_client.py delete mode 100644 google/cloud/dataproc_v1/gapic/job_controller_client_config.py delete mode 100644 google/cloud/dataproc_v1/gapic/transports/__init__.py delete mode 100644 google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1/gapic/workflow_template_service_client.py delete mode 100644 google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py delete mode 100644 google/cloud/dataproc_v1/proto/__init__.py delete mode 100644 google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py delete mode 100644 google/cloud/dataproc_v1/proto/autoscaling_policies_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1/proto/clusters_pb2.py delete mode 100644 google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1/proto/jobs_pb2.py delete mode 100644 google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1/proto/operations_pb2.py delete mode 100644 google/cloud/dataproc_v1/proto/operations_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1/proto/shared_pb2.py delete mode 100644 google/cloud/dataproc_v1/proto/shared_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1/proto/workflow_templates_pb2.py delete mode 100644 google/cloud/dataproc_v1/proto/workflow_templates_pb2_grpc.py create mode 100644 google/cloud/dataproc_v1/py.typed create mode 100644 google/cloud/dataproc_v1/services/__init__.py rename google/cloud/{dataproc.py => dataproc_v1/services/autoscaling_policy_service/__init__.py} (52%) create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py create mode 100644 google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/__init__.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/async_client.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/client.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/pagers.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/transports/base.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py create mode 100644 google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py rename google/{ => cloud/dataproc_v1/services/job_controller}/__init__.py (71%) create mode 100644 google/cloud/dataproc_v1/services/job_controller/async_client.py create mode 100644 google/cloud/dataproc_v1/services/job_controller/client.py create mode 100644 google/cloud/dataproc_v1/services/job_controller/pagers.py create mode 100644 google/cloud/dataproc_v1/services/job_controller/transports/__init__.py create mode 100644 google/cloud/dataproc_v1/services/job_controller/transports/base.py create mode 100644 google/cloud/dataproc_v1/services/job_controller/transports/grpc.py create mode 100644 google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/__init__.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/async_client.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/client.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/pagers.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py create mode 100644 google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py delete mode 100644 google/cloud/dataproc_v1/types.py create mode 100644 google/cloud/dataproc_v1/types/__init__.py create mode 100644 google/cloud/dataproc_v1/types/autoscaling_policies.py create mode 100644 google/cloud/dataproc_v1/types/clusters.py create mode 100644 google/cloud/dataproc_v1/types/jobs.py create mode 100644 google/cloud/dataproc_v1/types/operations.py create mode 100644 google/cloud/dataproc_v1/types/shared.py create mode 100644 google/cloud/dataproc_v1/types/workflow_templates.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/__init__.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/enums.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/job_controller_client.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/transports/__init__.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/transports/autoscaling_policy_service_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py delete mode 100644 google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/__init__.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/clusters_pb2.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/jobs_pb2.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/operations_pb2.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/operations_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/shared_pb2.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/shared_pb2_grpc.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py delete mode 100644 google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py create mode 100644 google/cloud/dataproc_v1beta2/py.typed create mode 100644 google/cloud/dataproc_v1beta2/services/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py create mode 100644 google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/client.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py create mode 100644 google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py rename google/cloud/{ => dataproc_v1beta2/services/job_controller}/__init__.py (71%) create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/async_client.py create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/client.py create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/pagers.py create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py create mode 100644 google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py create mode 100644 google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py delete mode 100644 google/cloud/dataproc_v1beta2/types.py create mode 100644 google/cloud/dataproc_v1beta2/types/__init__.py create mode 100644 google/cloud/dataproc_v1beta2/types/autoscaling_policies.py create mode 100644 google/cloud/dataproc_v1beta2/types/clusters.py create mode 100644 google/cloud/dataproc_v1beta2/types/jobs.py create mode 100644 google/cloud/dataproc_v1beta2/types/operations.py create mode 100644 google/cloud/dataproc_v1beta2/types/shared.py create mode 100644 google/cloud/dataproc_v1beta2/types/workflow_templates.py create mode 100644 mypy.ini create mode 100644 scripts/fixup_dataproc_v1_keywords.py create mode 100644 scripts/fixup_dataproc_v1beta2_keywords.py create mode 100644 tests/unit/gapic/dataproc_v1/__init__.py create mode 100644 tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py create mode 100644 tests/unit/gapic/dataproc_v1/test_cluster_controller.py create mode 100644 tests/unit/gapic/dataproc_v1/test_job_controller.py create mode 100644 tests/unit/gapic/dataproc_v1/test_workflow_template_service.py create mode 100644 tests/unit/gapic/dataproc_v1beta2/__init__.py create mode 100644 tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py create mode 100644 tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py create mode 100644 tests/unit/gapic/dataproc_v1beta2/test_job_controller.py create mode 100644 tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py delete mode 100644 tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py delete mode 100644 tests/unit/gapic/v1/test_cluster_controller_client_v1.py delete mode 100644 tests/unit/gapic/v1/test_job_controller_client_v1.py delete mode 100644 tests/unit/gapic/v1/test_workflow_template_service_client_v1.py delete mode 100644 tests/unit/gapic/v1beta2/test_autoscaling_policy_service_client_v1beta2.py delete mode 100644 tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py delete mode 100644 tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py delete mode 100644 tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py diff --git a/.coveragerc b/.coveragerc index dd39c854..218f2ddf 100644 --- a/.coveragerc +++ b/.coveragerc @@ -21,15 +21,14 @@ branch = True [report] fail_under = 100 show_missing = True +omit = google/cloud/dataproc/__init__.py exclude_lines = # Re-enable the standard pragma pragma: NO COVER # Ignore debug-only repr def __repr__ - # Ignore abstract methods - raise NotImplementedError -omit = - */gapic/*.py - */proto/*.py - */core/*.py - */site-packages/*.py \ No newline at end of file + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound \ No newline at end of file diff --git a/README.rst b/README.rst index d8b90c17..bc86bd61 100644 --- a/README.rst +++ b/README.rst @@ -1,7 +1,7 @@ Python Client for Google Cloud Dataproc API =========================================== -|ga| |pypi| |versions| +|ga| |pypi| |versions| `Google Cloud Dataproc API`_: Manages Hadoop-based clusters and jobs on Google Cloud Platform. @@ -49,11 +49,13 @@ dependencies. Supported Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^ -Python >= 3.5 +Python >= 3.6 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. +Python == 2.7. + +The last version of this library compatible with Python 2.7 is google-cloud-dataproc==1.1.0. Mac/Linux @@ -107,4 +109,4 @@ Next Steps - Read the `Client Library Documentation`_ for Google Cloud Dataproc API API to see other available methods on the client. - Read the `Product documentation`_ to learn more about the product and see - How-to Guides. + How-to Guides. \ No newline at end of file diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000..28d1f396 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,152 @@ +# 2.0.0 Migration Guide + +The 2.0 release of the `google-cloud-dataproc` client is a significant upgrade based on a [next-gen code generator](https://github.com/googleapis/gapic-generator-python), and includes substantial interface changes. Existing code written for earlier versions of this library will likely require updates to use this version. This document describes the changes that have been made, and what you need to do to update your usage. + +If you experience issues or have questions, please file an [issue](https://github.com/googleapis/python-dataproc/issues). + +## Supported Python Versions + +> **WARNING**: Breaking change + +The 2.0.0 release requires Python 3.6+. + + +## Method Calls + +> **WARNING**: Breaking change + +Methods expect request objects. We provide a script that will convert most common use cases. + +* Install the library + +```py +python3 -m pip install google-cloud-dataproc +``` + +* The script `fixup_dataproc_v1_keywords.py` is shipped with the library. It expects an input directory (with the code to convert) and an empty destination directory. + +```sh +$ fixup_dataproc_v1_keywords.py --input-directory .samples/ --output-directory samples/ +``` + +**Before:** +```py +from google.cloud import dataproc + +client = dataproc.ClusterControllerClient() + +clusters = client.list_clusters(project_id="project_id", region="region") +``` + + +**After:** +```py +from google.cloud import dataproc + +client = dataproc.ClusterControllerClient() + +clusters = client.list_clusters(request={ + 'project_id' : "project_id", 'region' : "region" +}) +``` + +### More Details + +In `google-cloud-dataproc<2.0.0`, parameters required by the API were positional parameters and optional parameters were keyword parameters. + +**Before:** +```py + def get_cluster( + self, + project_id, + region, + cluster_name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): +``` + +In the 2.0.0 release, all methods have a single positional parameter `request`. Method docstrings indicate whether a parameter is required or optional. + +Some methods have additional keyword only parameters. The available parameters depend on the [`google.api.method_signature` annotation](https://github.com/googleapis/googleapis/blob/master/google/cloud/dataproc/v1/clusters.proto#L88) specified by the API producer. + + +**After:** +```py + def get_cluster( + self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: +``` + +> **NOTE:** The `request` parameter and flattened keyword parameters for the API are mutually exclusive. +> Passing both will result in an error. + + +Both of these calls are valid: + +```py +response = client.get_cluster( + request={ + "project_id": project_id, + "region": region, + "cluster_name": cluster_name + } +) +``` + +```py +response = client.get_cluster( + project_id=project_id, + region=region, + cluster_name=cluster_name +) +``` + +This call is invalid because it mixes `request` with a keyword argument `cluster_name`. Executing this code +will result in an error. + +```py +response = client.get_cluster( + request={ + "project_id": project_id, + "region": region + }, + cluster_name=cluster_name +) +``` + + + +## Enums and Types + + +> **WARNING**: Breaking change + +The submodules `enums` and `types` have been removed. + +**Before:** +```py + +from google.cloud import dataproc + +status = dataproc.enums.ClusterStatus.State.CREATING +cluster = dataproc.types.Cluster(cluster_name="name") +``` + + +**After:** +```py +from google.cloud import dataproc + +status = dataproc.ClusterStatus.State.CREATING +cluster = dataproc.Cluster(cluster_name="name") +``` \ No newline at end of file diff --git a/docs/UPGRADING.md b/docs/UPGRADING.md new file mode 120000 index 00000000..01097c8c --- /dev/null +++ b/docs/UPGRADING.md @@ -0,0 +1 @@ +../UPGRADING.md \ No newline at end of file diff --git a/docs/dataproc_v1/services.rst b/docs/dataproc_v1/services.rst new file mode 100644 index 00000000..8c7fc841 --- /dev/null +++ b/docs/dataproc_v1/services.rst @@ -0,0 +1,15 @@ +Services for Google Cloud Dataproc v1 API +========================================= + +.. automodule:: google.cloud.dataproc_v1.services.autoscaling_policy_service + :members: + :inherited-members: +.. automodule:: google.cloud.dataproc_v1.services.cluster_controller + :members: + :inherited-members: +.. automodule:: google.cloud.dataproc_v1.services.job_controller + :members: + :inherited-members: +.. automodule:: google.cloud.dataproc_v1.services.workflow_template_service + :members: + :inherited-members: diff --git a/docs/dataproc_v1/types.rst b/docs/dataproc_v1/types.rst new file mode 100644 index 00000000..5cd2ad4b --- /dev/null +++ b/docs/dataproc_v1/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Dataproc v1 API +====================================== + +.. automodule:: google.cloud.dataproc_v1.types + :members: diff --git a/docs/dataproc_v1beta2/services.rst b/docs/dataproc_v1beta2/services.rst new file mode 100644 index 00000000..273b2def --- /dev/null +++ b/docs/dataproc_v1beta2/services.rst @@ -0,0 +1,15 @@ +Services for Google Cloud Dataproc v1beta2 API +============================================== + +.. automodule:: google.cloud.dataproc_v1beta2.services.autoscaling_policy_service + :members: + :inherited-members: +.. automodule:: google.cloud.dataproc_v1beta2.services.cluster_controller + :members: + :inherited-members: +.. automodule:: google.cloud.dataproc_v1beta2.services.job_controller + :members: + :inherited-members: +.. automodule:: google.cloud.dataproc_v1beta2.services.workflow_template_service + :members: + :inherited-members: diff --git a/docs/dataproc_v1beta2/types.rst b/docs/dataproc_v1beta2/types.rst new file mode 100644 index 00000000..e0972271 --- /dev/null +++ b/docs/dataproc_v1beta2/types.rst @@ -0,0 +1,5 @@ +Types for Google Cloud Dataproc v1beta2 API +=========================================== + +.. automodule:: google.cloud.dataproc_v1beta2.types + :members: diff --git a/docs/gapic/v1/api.rst b/docs/gapic/v1/api.rst deleted file mode 100644 index 450cfac4..00000000 --- a/docs/gapic/v1/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud Dataproc API -============================= - -.. automodule:: google.cloud.dataproc_v1 - :members: - :inherited-members: \ No newline at end of file diff --git a/docs/gapic/v1/types.rst b/docs/gapic/v1/types.rst deleted file mode 100644 index 3e11e932..00000000 --- a/docs/gapic/v1/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud Dataproc API Client -=================================== - -.. automodule:: google.cloud.dataproc_v1.types - :members: \ No newline at end of file diff --git a/docs/gapic/v1beta2/api.rst b/docs/gapic/v1beta2/api.rst deleted file mode 100644 index 986bc35e..00000000 --- a/docs/gapic/v1beta2/api.rst +++ /dev/null @@ -1,6 +0,0 @@ -Client for Cloud Dataproc API -============================= - -.. automodule:: google.cloud.dataproc_v1beta2 - :members: - :inherited-members: \ No newline at end of file diff --git a/docs/gapic/v1beta2/types.rst b/docs/gapic/v1beta2/types.rst deleted file mode 100644 index 636f2da5..00000000 --- a/docs/gapic/v1beta2/types.rst +++ /dev/null @@ -1,5 +0,0 @@ -Types for Cloud Dataproc API Client -=================================== - -.. automodule:: google.cloud.dataproc_v1beta2.types - :members: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index ed4ee560..9be413b6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -7,11 +7,20 @@ API Reference .. toctree:: :maxdepth: 2 - gapic/v1/api - gapic/v1/types - gapic/v1beta2/api - gapic/v1beta2/types + dataproc_v1/services + dataproc_v1/types + dataproc_v1beta2/services + dataproc_v1beta2/types +Migration Guide +--------------- + +See the guide below for instructions on migrating to the 2.x release of this library. + +.. toctree:: + :maxdepth: 2 + + UPGRADING Changelog --------- @@ -21,4 +30,4 @@ For a list of all ``google-cloud-dataproc`` releases: .. toctree:: :maxdepth: 2 - changelog + changelog \ No newline at end of file diff --git a/google/cloud/dataproc/__init__.py b/google/cloud/dataproc/__init__.py new file mode 100644 index 00000000..7ed0e897 --- /dev/null +++ b/google/cloud/dataproc/__init__.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.dataproc_v1.services.autoscaling_policy_service.async_client import ( + AutoscalingPolicyServiceAsyncClient, +) +from google.cloud.dataproc_v1.services.autoscaling_policy_service.client import ( + AutoscalingPolicyServiceClient, +) +from google.cloud.dataproc_v1.services.cluster_controller.async_client import ( + ClusterControllerAsyncClient, +) +from google.cloud.dataproc_v1.services.cluster_controller.client import ( + ClusterControllerClient, +) +from google.cloud.dataproc_v1.services.job_controller.async_client import ( + JobControllerAsyncClient, +) +from google.cloud.dataproc_v1.services.job_controller.client import JobControllerClient +from google.cloud.dataproc_v1.services.workflow_template_service.async_client import ( + WorkflowTemplateServiceAsyncClient, +) +from google.cloud.dataproc_v1.services.workflow_template_service.client import ( + WorkflowTemplateServiceClient, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import AutoscalingPolicy +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + BasicAutoscalingAlgorithm, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + BasicYarnAutoscalingConfig, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + CreateAutoscalingPolicyRequest, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + DeleteAutoscalingPolicyRequest, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + GetAutoscalingPolicyRequest, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + InstanceGroupAutoscalingPolicyConfig, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + ListAutoscalingPoliciesRequest, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + ListAutoscalingPoliciesResponse, +) +from google.cloud.dataproc_v1.types.autoscaling_policies import ( + UpdateAutoscalingPolicyRequest, +) +from google.cloud.dataproc_v1.types.clusters import AcceleratorConfig +from google.cloud.dataproc_v1.types.clusters import AutoscalingConfig +from google.cloud.dataproc_v1.types.clusters import Cluster +from google.cloud.dataproc_v1.types.clusters import ClusterConfig +from google.cloud.dataproc_v1.types.clusters import ClusterMetrics +from google.cloud.dataproc_v1.types.clusters import ClusterStatus +from google.cloud.dataproc_v1.types.clusters import CreateClusterRequest +from google.cloud.dataproc_v1.types.clusters import DeleteClusterRequest +from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterRequest +from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterResults +from google.cloud.dataproc_v1.types.clusters import DiskConfig +from google.cloud.dataproc_v1.types.clusters import EncryptionConfig +from google.cloud.dataproc_v1.types.clusters import EndpointConfig +from google.cloud.dataproc_v1.types.clusters import GceClusterConfig +from google.cloud.dataproc_v1.types.clusters import GetClusterRequest +from google.cloud.dataproc_v1.types.clusters import InstanceGroupConfig +from google.cloud.dataproc_v1.types.clusters import KerberosConfig +from google.cloud.dataproc_v1.types.clusters import LifecycleConfig +from google.cloud.dataproc_v1.types.clusters import ListClustersRequest +from google.cloud.dataproc_v1.types.clusters import ListClustersResponse +from google.cloud.dataproc_v1.types.clusters import ManagedGroupConfig +from google.cloud.dataproc_v1.types.clusters import NodeInitializationAction +from google.cloud.dataproc_v1.types.clusters import ReservationAffinity +from google.cloud.dataproc_v1.types.clusters import SecurityConfig +from google.cloud.dataproc_v1.types.clusters import SoftwareConfig +from google.cloud.dataproc_v1.types.clusters import UpdateClusterRequest +from google.cloud.dataproc_v1.types.jobs import CancelJobRequest +from google.cloud.dataproc_v1.types.jobs import DeleteJobRequest +from google.cloud.dataproc_v1.types.jobs import GetJobRequest +from google.cloud.dataproc_v1.types.jobs import HadoopJob +from google.cloud.dataproc_v1.types.jobs import HiveJob +from google.cloud.dataproc_v1.types.jobs import Job +from google.cloud.dataproc_v1.types.jobs import JobMetadata +from google.cloud.dataproc_v1.types.jobs import JobPlacement +from google.cloud.dataproc_v1.types.jobs import JobReference +from google.cloud.dataproc_v1.types.jobs import JobScheduling +from google.cloud.dataproc_v1.types.jobs import JobStatus +from google.cloud.dataproc_v1.types.jobs import ListJobsRequest +from google.cloud.dataproc_v1.types.jobs import ListJobsResponse +from google.cloud.dataproc_v1.types.jobs import LoggingConfig +from google.cloud.dataproc_v1.types.jobs import PigJob +from google.cloud.dataproc_v1.types.jobs import PrestoJob +from google.cloud.dataproc_v1.types.jobs import PySparkJob +from google.cloud.dataproc_v1.types.jobs import QueryList +from google.cloud.dataproc_v1.types.jobs import SparkJob +from google.cloud.dataproc_v1.types.jobs import SparkRJob +from google.cloud.dataproc_v1.types.jobs import SparkSqlJob +from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest +from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest +from google.cloud.dataproc_v1.types.jobs import YarnApplication +from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata +from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus +from google.cloud.dataproc_v1.types.shared import Component +from google.cloud.dataproc_v1.types.workflow_templates import ClusterOperation +from google.cloud.dataproc_v1.types.workflow_templates import ClusterSelector +from google.cloud.dataproc_v1.types.workflow_templates import ( + CreateWorkflowTemplateRequest, +) +from google.cloud.dataproc_v1.types.workflow_templates import ( + DeleteWorkflowTemplateRequest, +) +from google.cloud.dataproc_v1.types.workflow_templates import GetWorkflowTemplateRequest +from google.cloud.dataproc_v1.types.workflow_templates import ( + InstantiateInlineWorkflowTemplateRequest, +) +from google.cloud.dataproc_v1.types.workflow_templates import ( + InstantiateWorkflowTemplateRequest, +) +from google.cloud.dataproc_v1.types.workflow_templates import ( + ListWorkflowTemplatesRequest, +) +from google.cloud.dataproc_v1.types.workflow_templates import ( + ListWorkflowTemplatesResponse, +) +from google.cloud.dataproc_v1.types.workflow_templates import ManagedCluster +from google.cloud.dataproc_v1.types.workflow_templates import OrderedJob +from google.cloud.dataproc_v1.types.workflow_templates import ParameterValidation +from google.cloud.dataproc_v1.types.workflow_templates import RegexValidation +from google.cloud.dataproc_v1.types.workflow_templates import TemplateParameter +from google.cloud.dataproc_v1.types.workflow_templates import ( + UpdateWorkflowTemplateRequest, +) +from google.cloud.dataproc_v1.types.workflow_templates import ValueValidation +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowGraph +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowMetadata +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowNode +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowTemplate +from google.cloud.dataproc_v1.types.workflow_templates import WorkflowTemplatePlacement + +__all__ = ( + "AcceleratorConfig", + "AutoscalingConfig", + "AutoscalingPolicy", + "AutoscalingPolicyServiceAsyncClient", + "AutoscalingPolicyServiceClient", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "CancelJobRequest", + "Cluster", + "ClusterConfig", + "ClusterControllerAsyncClient", + "ClusterControllerClient", + "ClusterMetrics", + "ClusterOperation", + "ClusterOperationMetadata", + "ClusterOperationStatus", + "ClusterSelector", + "ClusterStatus", + "Component", + "CreateAutoscalingPolicyRequest", + "CreateClusterRequest", + "CreateWorkflowTemplateRequest", + "DeleteAutoscalingPolicyRequest", + "DeleteClusterRequest", + "DeleteJobRequest", + "DeleteWorkflowTemplateRequest", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "DiskConfig", + "EncryptionConfig", + "EndpointConfig", + "GceClusterConfig", + "GetAutoscalingPolicyRequest", + "GetClusterRequest", + "GetJobRequest", + "GetWorkflowTemplateRequest", + "HadoopJob", + "HiveJob", + "InstanceGroupAutoscalingPolicyConfig", + "InstanceGroupConfig", + "InstantiateInlineWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "Job", + "JobControllerAsyncClient", + "JobControllerClient", + "JobMetadata", + "JobPlacement", + "JobReference", + "JobScheduling", + "JobStatus", + "KerberosConfig", + "LifecycleConfig", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListJobsRequest", + "ListJobsResponse", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "LoggingConfig", + "ManagedCluster", + "ManagedGroupConfig", + "NodeInitializationAction", + "OrderedJob", + "ParameterValidation", + "PigJob", + "PrestoJob", + "PySparkJob", + "QueryList", + "RegexValidation", + "ReservationAffinity", + "SecurityConfig", + "SoftwareConfig", + "SparkJob", + "SparkRJob", + "SparkSqlJob", + "SubmitJobRequest", + "TemplateParameter", + "UpdateAutoscalingPolicyRequest", + "UpdateClusterRequest", + "UpdateJobRequest", + "UpdateWorkflowTemplateRequest", + "ValueValidation", + "WorkflowGraph", + "WorkflowMetadata", + "WorkflowNode", + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "WorkflowTemplateServiceAsyncClient", + "WorkflowTemplateServiceClient", + "YarnApplication", +) diff --git a/google/cloud/dataproc/py.typed b/google/cloud/dataproc/py.typed new file mode 100644 index 00000000..aac99cba --- /dev/null +++ b/google/cloud/dataproc/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-dataproc package uses inline types. diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py index 5a422a47..82d780ab 100644 --- a/google/cloud/dataproc_v1/__init__.py +++ b/google/cloud/dataproc_v1/__init__.py @@ -1,70 +1,197 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.dataproc_v1 import types -from google.cloud.dataproc_v1.gapic import autoscaling_policy_service_client -from google.cloud.dataproc_v1.gapic import cluster_controller_client -from google.cloud.dataproc_v1.gapic import enums -from google.cloud.dataproc_v1.gapic import job_controller_client -from google.cloud.dataproc_v1.gapic import workflow_template_service_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class AutoscalingPolicyServiceClient( - autoscaling_policy_service_client.AutoscalingPolicyServiceClient -): - __doc__ = autoscaling_policy_service_client.AutoscalingPolicyServiceClient.__doc__ - enums = enums - - -class ClusterControllerClient(cluster_controller_client.ClusterControllerClient): - __doc__ = cluster_controller_client.ClusterControllerClient.__doc__ - enums = enums - - -class JobControllerClient(job_controller_client.JobControllerClient): - __doc__ = job_controller_client.JobControllerClient.__doc__ - enums = enums - - -class WorkflowTemplateServiceClient( - workflow_template_service_client.WorkflowTemplateServiceClient -): - __doc__ = workflow_template_service_client.WorkflowTemplateServiceClient.__doc__ - enums = enums +from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient +from .services.cluster_controller import ClusterControllerClient +from .services.job_controller import JobControllerClient +from .services.workflow_template_service import WorkflowTemplateServiceClient +from .types.autoscaling_policies import AutoscalingPolicy +from .types.autoscaling_policies import BasicAutoscalingAlgorithm +from .types.autoscaling_policies import BasicYarnAutoscalingConfig +from .types.autoscaling_policies import CreateAutoscalingPolicyRequest +from .types.autoscaling_policies import DeleteAutoscalingPolicyRequest +from .types.autoscaling_policies import GetAutoscalingPolicyRequest +from .types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig +from .types.autoscaling_policies import ListAutoscalingPoliciesRequest +from .types.autoscaling_policies import ListAutoscalingPoliciesResponse +from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest +from .types.clusters import AcceleratorConfig +from .types.clusters import AutoscalingConfig +from .types.clusters import Cluster +from .types.clusters import ClusterConfig +from .types.clusters import ClusterMetrics +from .types.clusters import ClusterStatus +from .types.clusters import CreateClusterRequest +from .types.clusters import DeleteClusterRequest +from .types.clusters import DiagnoseClusterRequest +from .types.clusters import DiagnoseClusterResults +from .types.clusters import DiskConfig +from .types.clusters import EncryptionConfig +from .types.clusters import EndpointConfig +from .types.clusters import GceClusterConfig +from .types.clusters import GetClusterRequest +from .types.clusters import InstanceGroupConfig +from .types.clusters import KerberosConfig +from .types.clusters import LifecycleConfig +from .types.clusters import ListClustersRequest +from .types.clusters import ListClustersResponse +from .types.clusters import ManagedGroupConfig +from .types.clusters import NodeInitializationAction +from .types.clusters import ReservationAffinity +from .types.clusters import SecurityConfig +from .types.clusters import SoftwareConfig +from .types.clusters import UpdateClusterRequest +from .types.jobs import CancelJobRequest +from .types.jobs import DeleteJobRequest +from .types.jobs import GetJobRequest +from .types.jobs import HadoopJob +from .types.jobs import HiveJob +from .types.jobs import Job +from .types.jobs import JobMetadata +from .types.jobs import JobPlacement +from .types.jobs import JobReference +from .types.jobs import JobScheduling +from .types.jobs import JobStatus +from .types.jobs import ListJobsRequest +from .types.jobs import ListJobsResponse +from .types.jobs import LoggingConfig +from .types.jobs import PigJob +from .types.jobs import PrestoJob +from .types.jobs import PySparkJob +from .types.jobs import QueryList +from .types.jobs import SparkJob +from .types.jobs import SparkRJob +from .types.jobs import SparkSqlJob +from .types.jobs import SubmitJobRequest +from .types.jobs import UpdateJobRequest +from .types.jobs import YarnApplication +from .types.operations import ClusterOperationMetadata +from .types.operations import ClusterOperationStatus +from .types.shared import Component +from .types.workflow_templates import ClusterOperation +from .types.workflow_templates import ClusterSelector +from .types.workflow_templates import CreateWorkflowTemplateRequest +from .types.workflow_templates import DeleteWorkflowTemplateRequest +from .types.workflow_templates import GetWorkflowTemplateRequest +from .types.workflow_templates import InstantiateInlineWorkflowTemplateRequest +from .types.workflow_templates import InstantiateWorkflowTemplateRequest +from .types.workflow_templates import ListWorkflowTemplatesRequest +from .types.workflow_templates import ListWorkflowTemplatesResponse +from .types.workflow_templates import ManagedCluster +from .types.workflow_templates import OrderedJob +from .types.workflow_templates import ParameterValidation +from .types.workflow_templates import RegexValidation +from .types.workflow_templates import TemplateParameter +from .types.workflow_templates import UpdateWorkflowTemplateRequest +from .types.workflow_templates import ValueValidation +from .types.workflow_templates import WorkflowGraph +from .types.workflow_templates import WorkflowMetadata +from .types.workflow_templates import WorkflowNode +from .types.workflow_templates import WorkflowTemplate +from .types.workflow_templates import WorkflowTemplatePlacement __all__ = ( - "enums", - "types", + "AcceleratorConfig", + "AutoscalingConfig", + "AutoscalingPolicy", "AutoscalingPolicyServiceClient", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "CancelJobRequest", + "Cluster", + "ClusterConfig", "ClusterControllerClient", + "ClusterMetrics", + "ClusterOperation", + "ClusterOperationMetadata", + "ClusterOperationStatus", + "ClusterSelector", + "ClusterStatus", + "Component", + "CreateAutoscalingPolicyRequest", + "CreateClusterRequest", + "CreateWorkflowTemplateRequest", + "DeleteAutoscalingPolicyRequest", + "DeleteClusterRequest", + "DeleteJobRequest", + "DeleteWorkflowTemplateRequest", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "DiskConfig", + "EncryptionConfig", + "EndpointConfig", + "GceClusterConfig", + "GetAutoscalingPolicyRequest", + "GetClusterRequest", + "GetJobRequest", + "GetWorkflowTemplateRequest", + "HadoopJob", + "HiveJob", + "InstanceGroupAutoscalingPolicyConfig", + "InstanceGroupConfig", + "InstantiateInlineWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "Job", "JobControllerClient", + "JobMetadata", + "JobPlacement", + "JobReference", + "JobScheduling", + "JobStatus", + "KerberosConfig", + "LifecycleConfig", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListJobsRequest", + "ListJobsResponse", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "LoggingConfig", + "ManagedCluster", + "ManagedGroupConfig", + "NodeInitializationAction", + "OrderedJob", + "ParameterValidation", + "PigJob", + "PrestoJob", + "PySparkJob", + "QueryList", + "RegexValidation", + "ReservationAffinity", + "SecurityConfig", + "SoftwareConfig", + "SparkJob", + "SparkRJob", + "SparkSqlJob", + "SubmitJobRequest", + "TemplateParameter", + "UpdateAutoscalingPolicyRequest", + "UpdateClusterRequest", + "UpdateJobRequest", + "UpdateWorkflowTemplateRequest", + "ValueValidation", + "WorkflowGraph", + "WorkflowMetadata", + "WorkflowNode", + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "YarnApplication", "WorkflowTemplateServiceClient", ) diff --git a/google/cloud/dataproc_v1/gapic/__init__.py b/google/cloud/dataproc_v1/gapic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py b/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py deleted file mode 100644 index c1627925..00000000 --- a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client.py +++ /dev/null @@ -1,653 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1 AutoscalingPolicyService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.dataproc_v1.gapic import autoscaling_policy_service_client_config -from google.cloud.dataproc_v1.gapic import enums -from google.cloud.dataproc_v1.gapic.transports import ( - autoscaling_policy_service_grpc_transport, -) -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2_grpc -from google.protobuf import empty_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class AutoscalingPolicyServiceClient(object): - """ - The API interface for managing autoscaling policies in the - Dataproc API. - """ - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1.AutoscalingPolicyService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def autoscaling_policy_path(cls, project, location, autoscaling_policy): - """Return a fully-qualified autoscaling_policy string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}", - project=project, - location=location, - autoscaling_policy=autoscaling_policy, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def region_path(cls, project, region): - """Return a fully-qualified region string.""" - return google.api_core.path_template.expand( - "projects/{project}/regions/{region}", project=project, region=region, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.AutoscalingPolicyServiceGrpcTransport, - Callable[[~.Credentials, type], ~.AutoscalingPolicyServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = autoscaling_policy_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=autoscaling_policy_service_grpc_transport.AutoscalingPolicyServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = autoscaling_policy_service_grpc_transport.AutoscalingPolicyServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_autoscaling_policy( - self, - parent, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates new autoscaling policy. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.AutoscalingPolicyServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.create_autoscaling_policy(parent, policy) - - Args: - parent (str): Required. The "resource name" of the region or location, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, the resource - name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - policy (Union[dict, ~google.cloud.dataproc_v1.types.AutoscalingPolicy]): Required. The autoscaling policy to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "create_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_autoscaling_policy, - default_retry=self._method_configs["CreateAutoscalingPolicy"].retry, - default_timeout=self._method_configs["CreateAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( - parent=parent, policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_autoscaling_policy( - self, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.AutoscalingPolicyServiceClient() - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.update_autoscaling_policy(policy) - - Args: - policy (Union[dict, ~google.cloud.dataproc_v1.types.AutoscalingPolicy]): Required. The updated autoscaling policy. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "update_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_autoscaling_policy, - default_retry=self._method_configs["UpdateAutoscalingPolicy"].retry, - default_timeout=self._method_configs["UpdateAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( - policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("policy.name", policy.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_autoscaling_policy( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves autoscaling policy. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.AutoscalingPolicyServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.get_autoscaling_policy(name) - - Args: - name (str): Required. The "resource name" of the autoscaling policy, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the resource name - of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, the resource name - of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_autoscaling_policy, - default_retry=self._method_configs["GetAutoscalingPolicy"].retry, - default_timeout=self._method_configs["GetAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.GetAutoscalingPolicyRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_autoscaling_policies( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists autoscaling policies in the project. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.AutoscalingPolicyServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_autoscaling_policies(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_autoscaling_policies(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The "resource name" of the region or location, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1.types.AutoscalingPolicy` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_autoscaling_policies" not in self._inner_api_calls: - self._inner_api_calls[ - "list_autoscaling_policies" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_autoscaling_policies, - default_retry=self._method_configs["ListAutoscalingPolicies"].retry, - default_timeout=self._method_configs["ListAutoscalingPolicies"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.ListAutoscalingPoliciesRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_autoscaling_policies"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="policies", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_autoscaling_policy( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an autoscaling policy. It is an error to delete an autoscaling - policy that is in use by one or more clusters. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.AutoscalingPolicyServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> client.delete_autoscaling_policy(name) - - Args: - name (str): Required. The "resource name" of the autoscaling policy, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, the resource - name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.delete``, the resource - name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_autoscaling_policy, - default_retry=self._method_configs["DeleteAutoscalingPolicy"].retry, - default_timeout=self._method_configs["DeleteAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.DeleteAutoscalingPolicyRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py b/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py deleted file mode 100644 index 7066450a..00000000 --- a/google/cloud/dataproc_v1/gapic/autoscaling_policy_service_client_config.py +++ /dev/null @@ -1,139 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1.AutoscalingPolicyService": { - "retry_codes": { - "retry_policy_4_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "retry_policy_6_codes": [ - "INTERNAL", - "DEADLINE_EXCEEDED", - "UNAVAILABLE", - ], - "no_retry_codes": [], - "retry_policy_3_codes": ["UNAVAILABLE"], - "retry_policy_2_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "no_retry_1_codes": [], - "retry_policy_5_codes": ["UNAVAILABLE"], - "retry_policy_7_codes": ["UNAVAILABLE"], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_3_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "retry_policy_6_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_7_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "retry_policy_5_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_4_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "CreateAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "UpdateAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListAutoscalingPolicies": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1/gapic/cluster_controller_client.py b/google/cloud/dataproc_v1/gapic/cluster_controller_client.py deleted file mode 100644 index 9b01fa67..00000000 --- a/google/cloud/dataproc_v1/gapic/cluster_controller_client.py +++ /dev/null @@ -1,878 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1 ClusterController API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import grpc - -from google.cloud.dataproc_v1.gapic import cluster_controller_client_config -from google.cloud.dataproc_v1.gapic import enums -from google.cloud.dataproc_v1.gapic.transports import cluster_controller_grpc_transport -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2_grpc -from google.cloud.dataproc_v1.proto import clusters_pb2 -from google.cloud.dataproc_v1.proto import clusters_pb2_grpc -from google.cloud.dataproc_v1.proto import operations_pb2 as proto_operations_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class ClusterControllerClient(object): - """ - The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1.ClusterController" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.ClusterControllerGrpcTransport, - Callable[[~.Credentials, type], ~.ClusterControllerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = cluster_controller_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=cluster_controller_grpc_transport.ClusterControllerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_cluster( - self, - project_id, - region, - cluster, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(project_id, region, cluster) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster (Union[dict, ~google.cloud.dataproc_v1.types.Cluster]): Required. The cluster to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Cluster` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``CreateClusterRequest`` requests with the same id, then - the second request will be ignored and the first - ``google.longrunning.Operation`` created and stored in the backend is - returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.CreateClusterRequest( - project_id=project_id, - region=region, - cluster=cluster, - request_id=request_id, - ) - operation = self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - clusters_pb2.Cluster, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def update_cluster( - self, - project_id, - region, - cluster_name, - cluster, - update_mask, - graceful_decommission_timeout=None, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project the - cluster belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - cluster (Union[dict, ~google.cloud.dataproc_v1.types.Cluster]): Required. The changes to the cluster. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Cluster` - update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of the field - to update. For example, to change the number of workers in a cluster to - 5, the ``update_mask`` parameter would be specified as - ``config.worker_config.num_instances``, and the ``PATCH`` request body - would specify the new value, as follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers in a cluster to - 5, the ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the ``PATCH`` - request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: Currently, only the following fields can be updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or - change autoscaling policies
- - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.FieldMask` - graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful - decommissioning allows removing nodes from the cluster without - interrupting jobs in progress. Timeout specifies how long to wait for - jobs in progress to finish before forcefully removing nodes (and - potentially interrupting jobs). Default timeout is 0 (for forceful - decommission), and the maximum allowed timeout is 1 day. (see JSON - representation of - `Duration `__). - - Only supported on Dataproc image versions 1.2 and higher. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Duration` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``UpdateClusterRequest`` requests with the same id, then - the second request will be ignored and the first - ``google.longrunning.Operation`` created and stored in the backend is - returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.UpdateClusterRequest( - project_id=project_id, - region=region, - cluster_name=cluster_name, - cluster=cluster, - update_mask=update_mask, - graceful_decommission_timeout=graceful_decommission_timeout, - request_id=request_id, - ) - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - clusters_pb2.Cluster, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def delete_cluster( - self, - project_id, - region, - cluster_name, - cluster_uuid=None, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.delete_cluster(project_id, region, cluster_name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail - (with error NOT_FOUND) if cluster with specified UUID does not exist. - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``DeleteClusterRequest`` requests with the same id, then - the second request will be ignored and the first - ``google.longrunning.Operation`` created and stored in the backend is - returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.DeleteClusterRequest( - project_id=project_id, - region=region, - cluster_name=cluster_name, - cluster_uuid=cluster_uuid, - request_id=request_id, - ) - operation = self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def diagnose_cluster( - self, - project_id, - region, - cluster_name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - `DiagnoseClusterResults `__. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.diagnose_cluster(project_id, region, cluster_name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "diagnose_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "diagnose_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.diagnose_cluster, - default_retry=self._method_configs["DiagnoseCluster"].retry, - default_timeout=self._method_configs["DiagnoseCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name, - ) - operation = self._inner_api_calls["diagnose_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - clusters_pb2.DiagnoseClusterResults, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def get_cluster( - self, - project_id, - region, - cluster_name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the resource representation for a cluster in a project. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.get_cluster(project_id, region, cluster_name) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.GetClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name, - ) - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - project_id, - region, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all regions/{region}/clusters in a project alphabetically. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_clusters(project_id, region): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_clusters(project_id, region).pages: - ... for element in page: - ... # process element - ... pass - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - filter_ (str): Optional. A filter constraining the clusters to list. Filters are - case-sensitive and have the following syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, ``clusterName``, or - ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*`` - to match all values. ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``, - ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, - ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the - ``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the - cluster provided at creation time. Only the logical ``AND`` operator is - supported; space-separated items are treated as having an implicit - ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND labels.env = - staging AND labels.starred = \* - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1.types.Cluster` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.ListClustersRequest( - project_id=project_id, region=region, filter=filter_, page_size=page_size, - ) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_clusters"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="clusters", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py b/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py deleted file mode 100644 index 51479bb1..00000000 --- a/google/cloud/dataproc_v1/gapic/cluster_controller_client_config.py +++ /dev/null @@ -1,76 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1.ClusterController": { - "retry_codes": { - "retry_policy_6_codes": [ - "INTERNAL", - "DEADLINE_EXCEEDED", - "UNAVAILABLE", - ], - "no_retry_codes": [], - "retry_policy_5_codes": ["UNAVAILABLE"], - }, - "retry_params": { - "retry_policy_6_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_5_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - }, - "methods": { - "CreateCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "UpdateCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "DeleteCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "DiagnoseCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "GetCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - "ListClusters": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1/gapic/enums.py b/google/cloud/dataproc_v1/gapic/enums.py deleted file mode 100644 index ae288630..00000000 --- a/google/cloud/dataproc_v1/gapic/enums.py +++ /dev/null @@ -1,324 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Component(enum.IntEnum): - """ - Cluster components that can be activated. - - Attributes: - COMPONENT_UNSPECIFIED (int): Unspecified component. Specifying this will cause Cluster creation to fail. - ANACONDA (int): The Anaconda python distribution. - HIVE_WEBHCAT (int): The Hive Web HCatalog (the REST service for accessing HCatalog). - JUPYTER (int): The Jupyter Notebook. - PRESTO (int): The Presto query engine. - ZEPPELIN (int): The Zeppelin notebook. - ZOOKEEPER (int): The Zookeeper service. - """ - - COMPONENT_UNSPECIFIED = 0 - ANACONDA = 5 - HIVE_WEBHCAT = 3 - JUPYTER = 1 - PRESTO = 6 - ZEPPELIN = 4 - ZOOKEEPER = 8 - - -class ClusterOperationStatus(object): - class State(enum.IntEnum): - """ - The operation state. - - Attributes: - UNKNOWN (int): Unused. - PENDING (int): The operation has been created. - RUNNING (int): The operation is running. - DONE (int): The operation is done; either cancelled or completed. - """ - - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - -class ClusterStatus(object): - class State(enum.IntEnum): - """ - The cluster state. - - Attributes: - UNKNOWN (int): The cluster state is unknown. - CREATING (int): The cluster is being created and set up. It is not ready for use. - RUNNING (int): The cluster is currently running and healthy. It is ready for use. - ERROR (int): The cluster encountered an error. It is not ready for use. - DELETING (int): The cluster is being deleted. It cannot be used. - UPDATING (int): The cluster is being updated. It continues to accept and process jobs. - """ - - UNKNOWN = 0 - CREATING = 1 - RUNNING = 2 - ERROR = 3 - DELETING = 4 - UPDATING = 5 - - class Substate(enum.IntEnum): - """ - The cluster substate. - - Attributes: - UNSPECIFIED (int): The cluster substate is unknown. - UNHEALTHY (int): The cluster is known to be in an unhealthy state - (for example, critical daemons are not running or HDFS capacity is - exhausted). - - Applies to RUNNING state. - STALE_STATUS (int): The agent-reported status is out of date (may occur if - Dataproc loses communication with Agent). - - Applies to RUNNING state. - """ - - UNSPECIFIED = 0 - UNHEALTHY = 1 - STALE_STATUS = 2 - - -class InstanceGroupConfig(object): - class Preemptibility(enum.IntEnum): - """ - Controls the use of [preemptible instances] - (https://cloud.google.com/compute/docs/instances/preemptible) within the - group. - - Attributes: - PREEMPTIBILITY_UNSPECIFIED (int): Preemptibility is unspecified, the system will choose the - appropriate setting for each instance group. - NON_PREEMPTIBLE (int): Instances are non-preemptible. - - This option is allowed for all instance groups and is the only valid - value for Master and Worker instance groups. - PREEMPTIBLE (int): Instances are preemptible. - - This option is allowed only for secondary worker groups. - """ - - PREEMPTIBILITY_UNSPECIFIED = 0 - NON_PREEMPTIBLE = 1 - PREEMPTIBLE = 2 - - -class JobStatus(object): - class State(enum.IntEnum): - """ - The job state. - - Attributes: - STATE_UNSPECIFIED (int): The job state is unknown. - PENDING (int): The job is pending; it has been submitted, but is not yet running. - SETUP_DONE (int): Job has been received by the service and completed initial setup; - it will soon be submitted to the cluster. - RUNNING (int): The job is running on the cluster. - CANCEL_PENDING (int): A CancelJob request has been received, but is pending. - CANCEL_STARTED (int): Transient in-flight resources have been canceled, and the request to - cancel the running job has been issued to the cluster. - CANCELLED (int): The job cancellation was successful. - DONE (int): The job has completed successfully. - ERROR (int): The job has completed, but encountered an error. - ATTEMPT_FAILURE (int): Job attempt has failed. The detail field contains failure details for - this attempt. - - Applies to restartable jobs only. - """ - - STATE_UNSPECIFIED = 0 - PENDING = 1 - SETUP_DONE = 8 - RUNNING = 2 - CANCEL_PENDING = 3 - CANCEL_STARTED = 7 - CANCELLED = 4 - DONE = 5 - ERROR = 6 - ATTEMPT_FAILURE = 9 - - class Substate(enum.IntEnum): - """ - The job substate. - - Attributes: - UNSPECIFIED (int): The job substate is unknown. - SUBMITTED (int): The Job is submitted to the agent. - - Applies to RUNNING state. - QUEUED (int): The Job has been received and is awaiting execution (it may be waiting - for a condition to be met). See the "details" field for the reason for - the delay. - - Applies to RUNNING state. - STALE_STATUS (int): The agent-reported status is out of date, which may be caused by a - loss of communication between the agent and Dataproc. If the - agent does not send a timely update, the job will fail. - - Applies to RUNNING state. - """ - - UNSPECIFIED = 0 - SUBMITTED = 1 - QUEUED = 2 - STALE_STATUS = 3 - - -class ListJobsRequest(object): - class JobStateMatcher(enum.IntEnum): - """ - A matcher that specifies categories of job states. - - Attributes: - ALL (int): Match all jobs, regardless of state. - ACTIVE (int): Only match jobs in non-terminal states: PENDING, RUNNING, or - CANCEL_PENDING. - NON_ACTIVE (int): Only match jobs in terminal states: CANCELLED, DONE, or ERROR. - """ - - ALL = 0 - ACTIVE = 1 - NON_ACTIVE = 2 - - -class LoggingConfig(object): - class Level(enum.IntEnum): - """ - The Log4j level for job execution. When running an `Apache - Hive `__ job, Cloud Dataproc configures the - Hive client to an equivalent verbosity level. - - Attributes: - LEVEL_UNSPECIFIED (int): Level is unspecified. Use default level for log4j. - ALL (int): Use ALL level for log4j. - TRACE (int): Use TRACE level for log4j. - DEBUG (int): Use DEBUG level for log4j. - INFO (int): Use INFO level for log4j. - WARN (int): Use WARN level for log4j. - ERROR (int): Use ERROR level for log4j. - FATAL (int): Use FATAL level for log4j. - OFF (int): Turn off log4j. - """ - - LEVEL_UNSPECIFIED = 0 - ALL = 1 - TRACE = 2 - DEBUG = 3 - INFO = 4 - WARN = 5 - ERROR = 6 - FATAL = 7 - OFF = 8 - - -class ReservationAffinity(object): - class Type(enum.IntEnum): - """ - Indicates whether to consume capacity from an reservation or not. - - Attributes: - TYPE_UNSPECIFIED (int) - NO_RESERVATION (int): Do not consume from any allocated capacity. - ANY_RESERVATION (int): Consume any reservation available. - SPECIFIC_RESERVATION (int): Must consume from a specific reservation. Must specify key value fields - for specifying the reservations. - """ - - TYPE_UNSPECIFIED = 0 - NO_RESERVATION = 1 - ANY_RESERVATION = 2 - SPECIFIC_RESERVATION = 3 - - -class WorkflowMetadata(object): - class State(enum.IntEnum): - """ - The operation state. - - Attributes: - UNKNOWN (int): Unused. - PENDING (int): The operation has been created. - RUNNING (int): The operation is running. - DONE (int): The operation is done; either cancelled or completed. - """ - - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - -class WorkflowNode(object): - class NodeState(enum.IntEnum): - """ - The workflow node state. - - Attributes: - NODE_STATE_UNSPECIFIED (int): State is unspecified. - BLOCKED (int): The node is awaiting prerequisite node to finish. - RUNNABLE (int): The node is runnable but not running. - RUNNING (int): The node is running. - COMPLETED (int): The node completed successfully. - FAILED (int): The node failed. A node can be marked FAILED because - its ancestor or peer failed. - """ - - NODE_STATE_UNSPECIFIED = 0 - BLOCKED = 1 - RUNNABLE = 2 - RUNNING = 3 - COMPLETED = 4 - FAILED = 5 - - -class YarnApplication(object): - class State(enum.IntEnum): - """ - The application state, corresponding to - YarnProtos.YarnApplicationStateProto. - - Attributes: - STATE_UNSPECIFIED (int): Status is unspecified. - NEW (int): Status is NEW. - NEW_SAVING (int): Status is NEW_SAVING. - SUBMITTED (int): Status is SUBMITTED. - ACCEPTED (int): Status is ACCEPTED. - RUNNING (int): Status is RUNNING. - FINISHED (int): Status is FINISHED. - FAILED (int): Status is FAILED. - KILLED (int): Status is KILLED. - """ - - STATE_UNSPECIFIED = 0 - NEW = 1 - NEW_SAVING = 2 - SUBMITTED = 3 - ACCEPTED = 4 - RUNNING = 5 - FINISHED = 6 - FAILED = 7 - KILLED = 8 diff --git a/google/cloud/dataproc_v1/gapic/job_controller_client.py b/google/cloud/dataproc_v1/gapic/job_controller_client.py deleted file mode 100644 index 25f12dfa..00000000 --- a/google/cloud/dataproc_v1/gapic/job_controller_client.py +++ /dev/null @@ -1,810 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1 JobController API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import grpc - -from google.cloud.dataproc_v1.gapic import enums -from google.cloud.dataproc_v1.gapic import job_controller_client_config -from google.cloud.dataproc_v1.gapic.transports import job_controller_grpc_transport -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2_grpc -from google.cloud.dataproc_v1.proto import clusters_pb2 -from google.cloud.dataproc_v1.proto import clusters_pb2_grpc -from google.cloud.dataproc_v1.proto import jobs_pb2 -from google.cloud.dataproc_v1.proto import jobs_pb2_grpc -from google.cloud.dataproc_v1.proto import operations_pb2 as proto_operations_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class JobControllerClient(object): - """The JobController provides methods to manage jobs.""" - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1.JobController" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.JobControllerGrpcTransport, - Callable[[~.Credentials, type], ~.JobControllerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = job_controller_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=job_controller_grpc_transport.JobControllerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = job_controller_grpc_transport.JobControllerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def submit_job( - self, - project_id, - region, - job, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Submits a job to a cluster. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> response = client.submit_job(project_id, region, job) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The job resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Job` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same id, then the - second request will be ignored and the first ``Job`` created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "submit_job" not in self._inner_api_calls: - self._inner_api_calls[ - "submit_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.submit_job, - default_retry=self._method_configs["SubmitJob"].retry, - default_timeout=self._method_configs["SubmitJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job, request_id=request_id, - ) - return self._inner_api_calls["submit_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def submit_job_as_operation( - self, - project_id, - region, - job, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Submits job to a cluster. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> response = client.submit_job_as_operation(project_id, region, job) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The job resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Job` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same id, then the - second request will be ignored and the first ``Job`` created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "submit_job_as_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "submit_job_as_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.submit_job_as_operation, - default_retry=self._method_configs["SubmitJobAsOperation"].retry, - default_timeout=self._method_configs["SubmitJobAsOperation"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job, request_id=request_id, - ) - operation = self._inner_api_calls["submit_job_as_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - jobs_pb2.Job, - metadata_type=jobs_pb2.JobMetadata, - ) - - def get_job( - self, - project_id, - region, - job_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the resource representation for a job in a project. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> response = client.get_job(project_id, region, job_id) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_job" not in self._inner_api_calls: - self._inner_api_calls[ - "get_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_job, - default_retry=self._method_configs["GetJob"].retry, - default_timeout=self._method_configs["GetJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.GetJobRequest( - project_id=project_id, region=region, job_id=job_id, - ) - return self._inner_api_calls["get_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_jobs( - self, - project_id, - region, - page_size=None, - cluster_name=None, - job_state_matcher=None, - filter_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists regions/{region}/jobs in a project. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_jobs(project_id, region): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_jobs(project_id, region).pages: - ... for element in page: - ... # process element - ... pass - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - cluster_name (str): Optional. If set, the returned jobs list includes only jobs that were - submitted to the named cluster. - job_state_matcher (~google.cloud.dataproc_v1.types.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. (default - = match ALL jobs). - - If ``filter`` is provided, ``jobStateMatcher`` will be ignored. - filter_ (str): Optional. A filter constraining the jobs to list. Filters are - case-sensitive and have the following syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, and ``[KEY]`` - is a label key. **value** can be ``*`` to match all values. - ``status.state`` can be either ``ACTIVE`` or ``NON_ACTIVE``. Only the - logical ``AND`` operator is supported; space-separated items are treated - as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND labels.starred = \* - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1.types.Job` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_jobs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_jobs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_jobs, - default_retry=self._method_configs["ListJobs"].retry, - default_timeout=self._method_configs["ListJobs"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.ListJobsRequest( - project_id=project_id, - region=region, - page_size=page_size, - cluster_name=cluster_name, - job_state_matcher=job_state_matcher, - filter=filter_, - ) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_jobs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="jobs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_job( - self, - project_id, - region, - job_id, - job, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a job in a project. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_job(project_id, region, job_id, job, update_mask) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - job (Union[dict, ~google.cloud.dataproc_v1.types.Job]): Required. The changes to the job. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.Job` - update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to Job, of the field to - update. For example, to update the labels of a Job the update_mask - parameter would be specified as labels, and the ``PATCH`` request body - would specify the new value. Note: Currently, labels is the only field - that can be updated. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_job" not in self._inner_api_calls: - self._inner_api_calls[ - "update_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_job, - default_retry=self._method_configs["UpdateJob"].retry, - default_timeout=self._method_configs["UpdateJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.UpdateJobRequest( - project_id=project_id, - region=region, - job_id=job_id, - job=job, - update_mask=update_mask, - ) - return self._inner_api_calls["update_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def cancel_job( - self, - project_id, - region, - job_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts a job cancellation request. To access the job resource after - cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> response = client.cancel_job(project_id, region, job_id) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "cancel_job" not in self._inner_api_calls: - self._inner_api_calls[ - "cancel_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.cancel_job, - default_retry=self._method_configs["CancelJob"].retry, - default_timeout=self._method_configs["CancelJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.CancelJobRequest( - project_id=project_id, region=region, job_id=job_id, - ) - return self._inner_api_calls["cancel_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_job( - self, - project_id, - region, - job_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes the job from the project. If the job is active, the delete - fails, and the response returns ``FAILED_PRECONDITION``. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> client.delete_job(project_id, region, job_id) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_job" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_job, - default_retry=self._method_configs["DeleteJob"].retry, - default_timeout=self._method_configs["DeleteJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.DeleteJobRequest( - project_id=project_id, region=region, job_id=job_id, - ) - self._inner_api_calls["delete_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/dataproc_v1/gapic/job_controller_client_config.py b/google/cloud/dataproc_v1/gapic/job_controller_client_config.py deleted file mode 100644 index c04bef57..00000000 --- a/google/cloud/dataproc_v1/gapic/job_controller_client_config.py +++ /dev/null @@ -1,149 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1.JobController": { - "retry_codes": { - "retry_policy_4_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "retry_policy_6_codes": [ - "INTERNAL", - "DEADLINE_EXCEEDED", - "UNAVAILABLE", - ], - "no_retry_codes": [], - "retry_policy_3_codes": ["UNAVAILABLE"], - "retry_policy_2_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "no_retry_1_codes": [], - "retry_policy_5_codes": ["UNAVAILABLE"], - "retry_policy_7_codes": ["UNAVAILABLE"], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_3_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "retry_policy_6_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_7_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "retry_policy_5_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_4_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "SubmitJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_7_codes", - "retry_params_name": "retry_policy_7_params", - }, - "SubmitJobAsOperation": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_7_codes", - "retry_params_name": "retry_policy_7_params", - }, - "GetJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "ListJobs": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "UpdateJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_7_codes", - "retry_params_name": "retry_policy_7_params", - }, - "CancelJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "DeleteJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_7_codes", - "retry_params_name": "retry_policy_7_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1/gapic/transports/__init__.py b/google/cloud/dataproc_v1/gapic/transports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py deleted file mode 100644 index b2b4e646..00000000 --- a/google/cloud/dataproc_v1/gapic/transports/autoscaling_policy_service_grpc_transport.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2_grpc - - -class AutoscalingPolicyServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1 AutoscalingPolicyService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "autoscaling_policy_service_stub": autoscaling_policies_pb2_grpc.AutoscalingPolicyServiceStub( - channel - ), - } - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.create_autoscaling_policy`. - - Creates new autoscaling policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].CreateAutoscalingPolicy - - @property - def update_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.update_autoscaling_policy`. - - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].UpdateAutoscalingPolicy - - @property - def get_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.get_autoscaling_policy`. - - Retrieves autoscaling policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].GetAutoscalingPolicy - - @property - def list_autoscaling_policies(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.list_autoscaling_policies`. - - Lists autoscaling policies in the project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].ListAutoscalingPolicies - - @property - def delete_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.delete_autoscaling_policy`. - - Deletes an autoscaling policy. It is an error to delete an autoscaling - policy that is in use by one or more clusters. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].DeleteAutoscalingPolicy diff --git a/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py deleted file mode 100644 index 5fc36d86..00000000 --- a/google/cloud/dataproc_v1/gapic/transports/cluster_controller_grpc_transport.py +++ /dev/null @@ -1,204 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.dataproc_v1.proto import clusters_pb2_grpc - - -class ClusterControllerGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1 ClusterController API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "cluster_controller_stub": clusters_pb2_grpc.ClusterControllerStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.create_cluster`. - - Creates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].CreateCluster - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.update_cluster`. - - Updates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].UpdateCluster - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.delete_cluster`. - - Deletes a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].DeleteCluster - - @property - def diagnose_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.diagnose_cluster`. - - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - `DiagnoseClusterResults `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].DiagnoseCluster - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.get_cluster`. - - Gets the resource representation for a cluster in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].GetCluster - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.list_clusters`. - - Lists all regions/{region}/clusters in a project alphabetically. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].ListClusters diff --git a/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py deleted file mode 100644 index 54a30763..00000000 --- a/google/cloud/dataproc_v1/gapic/transports/job_controller_grpc_transport.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.dataproc_v1.proto import jobs_pb2_grpc - - -class JobControllerGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1 JobController API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "job_controller_stub": jobs_pb2_grpc.JobControllerStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def submit_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.submit_job`. - - Submits a job to a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].SubmitJob - - @property - def submit_job_as_operation(self): - """Return the gRPC stub for :meth:`JobControllerClient.submit_job_as_operation`. - - Submits job to a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].SubmitJobAsOperation - - @property - def get_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.get_job`. - - Gets the resource representation for a job in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].GetJob - - @property - def list_jobs(self): - """Return the gRPC stub for :meth:`JobControllerClient.list_jobs`. - - Lists regions/{region}/jobs in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].ListJobs - - @property - def update_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.update_job`. - - Updates a job in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].UpdateJob - - @property - def cancel_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.cancel_job`. - - Starts a job cancellation request. To access the job resource after - cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].CancelJob - - @property - def delete_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.delete_job`. - - Deletes the job from the project. If the job is active, the delete - fails, and the response returns ``FAILED_PRECONDITION``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].DeleteJob diff --git a/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py b/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py deleted file mode 100644 index 705ab40b..00000000 --- a/google/cloud/dataproc_v1/gapic/transports/workflow_template_service_grpc_transport.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.dataproc_v1.proto import workflow_templates_pb2_grpc - - -class WorkflowTemplateServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1 WorkflowTemplateService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "workflow_template_service_stub": workflow_templates_pb2_grpc.WorkflowTemplateServiceStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def instantiate_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.instantiate_workflow_template`. - - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].InstantiateWorkflowTemplate - - @property - def instantiate_inline_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.instantiate_inline_workflow_template`. - - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - ``CreateWorkflowTemplate``, ``InstantiateWorkflowTemplate``, - ``DeleteWorkflowTemplate``. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "workflow_template_service_stub" - ].InstantiateInlineWorkflowTemplate - - @property - def create_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.create_workflow_template`. - - Creates new workflow template. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate - - @property - def get_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.get_workflow_template`. - - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate - - @property - def update_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.update_workflow_template`. - - Updates (replaces) workflow template. The updated template - must contain version that matches the current server version. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].UpdateWorkflowTemplate - - @property - def list_workflow_templates(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.list_workflow_templates`. - - Lists workflows that match the specified filter in the request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].ListWorkflowTemplates - - @property - def delete_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.delete_workflow_template`. - - Deletes a workflow template. It does not cancel in-progress workflows. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].DeleteWorkflowTemplate diff --git a/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py b/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py deleted file mode 100644 index 417a1dd0..00000000 --- a/google/cloud/dataproc_v1/gapic/workflow_template_service_client.py +++ /dev/null @@ -1,949 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1 WorkflowTemplateService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.dataproc_v1.gapic import enums -from google.cloud.dataproc_v1.gapic import workflow_template_service_client_config -from google.cloud.dataproc_v1.gapic.transports import ( - workflow_template_service_grpc_transport, -) -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2_grpc -from google.cloud.dataproc_v1.proto import clusters_pb2 -from google.cloud.dataproc_v1.proto import clusters_pb2_grpc -from google.cloud.dataproc_v1.proto import jobs_pb2 -from google.cloud.dataproc_v1.proto import jobs_pb2_grpc -from google.cloud.dataproc_v1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.dataproc_v1.proto import workflow_templates_pb2 -from google.cloud.dataproc_v1.proto import workflow_templates_pb2_grpc -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class WorkflowTemplateServiceClient(object): - """ - The API interface for managing Workflow Templates in the - Dataproc API. - """ - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1.WorkflowTemplateService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def region_path(cls, project, region): - """Return a fully-qualified region string.""" - return google.api_core.path_template.expand( - "projects/{project}/regions/{region}", project=project, region=region, - ) - - @classmethod - def workflow_template_path(cls, project, region, workflow_template): - """Return a fully-qualified workflow_template string.""" - return google.api_core.path_template.expand( - "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}", - project=project, - region=region, - workflow_template=workflow_template, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.WorkflowTemplateServiceGrpcTransport, - Callable[[~.Credentials, type], ~.WorkflowTemplateServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = workflow_template_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=workflow_template_service_grpc_transport.WorkflowTemplateServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = workflow_template_service_grpc_transport.WorkflowTemplateServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def instantiate_workflow_template( - self, - name, - version=None, - request_id=None, - parameters=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.instantiate_workflow_template(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.instantiate``, the resource - name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to instantiate. If specified, - the workflow will be instantiated only if the current version of - the workflow template has the supplied version. - - This option cannot be used to instantiate a previous version of - workflow template. - request_id (str): Optional. A tag that prevents multiple concurrent workflow instances - with the same tag from running. This mitigates risk of concurrent - instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - parameters (dict[str -> str]): Optional. Map from parameter names to values that should be used for those - parameters. Values may not exceed 100 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "instantiate_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "instantiate_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.instantiate_workflow_template, - default_retry=self._method_configs["InstantiateWorkflowTemplate"].retry, - default_timeout=self._method_configs[ - "InstantiateWorkflowTemplate" - ].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest( - name=name, version=version, request_id=request_id, parameters=parameters, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["instantiate_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates_pb2.WorkflowMetadata, - ) - - def instantiate_inline_workflow_template( - self, - parent, - template, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - ``CreateWorkflowTemplate``, ``InstantiateWorkflowTemplate``, - ``DeleteWorkflowTemplate``. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.instantiate_inline_workflow_template(parent, template) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,instantiateinline``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.instantiateinline``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (Union[dict, ~google.cloud.dataproc_v1.types.WorkflowTemplate]): Required. The workflow template to instantiate. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` - request_id (str): Optional. A tag that prevents multiple concurrent workflow instances - with the same tag from running. This mitigates risk of concurrent - instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "instantiate_inline_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "instantiate_inline_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.instantiate_inline_workflow_template, - default_retry=self._method_configs[ - "InstantiateInlineWorkflowTemplate" - ].retry, - default_timeout=self._method_configs[ - "InstantiateInlineWorkflowTemplate" - ].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.InstantiateInlineWorkflowTemplateRequest( - parent=parent, template=template, request_id=request_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["instantiate_inline_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates_pb2.WorkflowMetadata, - ) - - def create_workflow_template( - self, - parent, - template, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates new workflow template. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.create_workflow_template(parent, template) - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (Union[dict, ~google.cloud.dataproc_v1.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "create_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_workflow_template, - default_retry=self._method_configs["CreateWorkflowTemplate"].retry, - default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_workflow_template( - self, - name, - version=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.get_workflow_template(name) - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the resource name of - the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the resource name - of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to retrieve. Only previously - instantiated versions can be retrieved. - - If unspecified, retrieves the current version. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "get_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_workflow_template, - default_retry=self._method_configs["GetWorkflowTemplate"].retry, - default_timeout=self._method_configs["GetWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.GetWorkflowTemplateRequest( - name=name, version=version, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_workflow_template( - self, - template, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates (replaces) workflow template. The updated template - must contain version that matches the current server version. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.update_workflow_template(template) - - Args: - template (Union[dict, ~google.cloud.dataproc_v1.types.WorkflowTemplate]): Required. The updated workflow template. - - The ``template.version`` field must match the current version. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "update_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_workflow_template, - default_retry=self._method_configs["UpdateWorkflowTemplate"].retry, - default_timeout=self._method_configs["UpdateWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.UpdateWorkflowTemplateRequest( - template=template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("template.name", template.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_workflow_templates( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists workflows that match the specified filter in the request. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_workflow_templates(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_workflow_templates(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the resource name of - the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, the resource name - of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1.types.WorkflowTemplate` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_workflow_templates" not in self._inner_api_calls: - self._inner_api_calls[ - "list_workflow_templates" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_workflow_templates, - default_retry=self._method_configs["ListWorkflowTemplates"].retry, - default_timeout=self._method_configs["ListWorkflowTemplates"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.ListWorkflowTemplatesRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_workflow_templates"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="templates", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_workflow_template( - self, - name, - version=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a workflow template. It does not cancel in-progress workflows. - - Example: - >>> from google.cloud import dataproc_v1 - >>> - >>> client = dataproc_v1.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> client.delete_workflow_template(name) - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, the resource name - of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to delete. If specified, - will only delete the template if the current server version matches - specified version. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_workflow_template, - default_retry=self._method_configs["DeleteWorkflowTemplate"].retry, - default_timeout=self._method_configs["DeleteWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.DeleteWorkflowTemplateRequest( - name=name, version=version, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py b/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py deleted file mode 100644 index ec0aea38..00000000 --- a/google/cloud/dataproc_v1/gapic/workflow_template_service_client_config.py +++ /dev/null @@ -1,81 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1.WorkflowTemplateService": { - "retry_codes": { - "retry_policy_4_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "no_retry_codes": [], - "retry_policy_3_codes": ["UNAVAILABLE"], - }, - "retry_params": { - "retry_policy_3_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_4_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - }, - "methods": { - "InstantiateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "InstantiateInlineWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "CreateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "GetWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_4_codes", - "retry_params_name": "retry_policy_4_params", - }, - "UpdateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "ListWorkflowTemplates": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_4_codes", - "retry_params_name": "retry_policy_4_params", - }, - "DeleteWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1/proto/__init__.py b/google/cloud/dataproc_v1/proto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py b/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py deleted file mode 100644 index 22c0273a..00000000 --- a/google/cloud/dataproc_v1/proto/autoscaling_policies_pb2.py +++ /dev/null @@ -1,1218 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1/proto/autoscaling_policies.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1/proto/autoscaling_policies.proto", - package="google.cloud.dataproc.v1", - syntax="proto3", - serialized_options=b"\n\034com.google.cloud.dataproc.v1B\030AutoscalingPoliciesProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc\352AE\n\036dataproc.googleapis.com/Region\022#projects/{project}/regions/{region}", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n9google/cloud/dataproc_v1/proto/autoscaling_policies.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto"\xa8\x04\n\x11\x41utoscalingPolicy\x12\n\n\x02id\x18\x01 \x01(\t\x12\x11\n\x04name\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12S\n\x0f\x62\x61sic_algorithm\x18\x03 \x01(\x0b\x32\x33.google.cloud.dataproc.v1.BasicAutoscalingAlgorithmB\x03\xe0\x41\x02H\x00\x12Z\n\rworker_config\x18\x04 \x01(\x0b\x32>.google.cloud.dataproc.v1.InstanceGroupAutoscalingPolicyConfigB\x03\xe0\x41\x02\x12\x64\n\x17secondary_worker_config\x18\x05 \x01(\x0b\x32>.google.cloud.dataproc.v1.InstanceGroupAutoscalingPolicyConfigB\x03\xe0\x41\x01:\xcf\x01\xea\x41\xcb\x01\n)dataproc.googleapis.com/AutoscalingPolicy\x12Pprojects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}\x12Lprojects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy}B\x0b\n\talgorithm"\xa4\x01\n\x19\x42\x61sicAutoscalingAlgorithm\x12N\n\x0byarn_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.dataproc.v1.BasicYarnAutoscalingConfigB\x03\xe0\x41\x02\x12\x37\n\x0f\x63ooldown_period\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\xf9\x01\n\x1a\x42\x61sicYarnAutoscalingConfig\x12\x45\n\x1dgraceful_decommission_timeout\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x02\x12\x1c\n\x0fscale_up_factor\x18\x01 \x01(\x01\x42\x03\xe0\x41\x02\x12\x1e\n\x11scale_down_factor\x18\x02 \x01(\x01\x42\x03\xe0\x41\x02\x12)\n\x1cscale_up_min_worker_fraction\x18\x03 \x01(\x01\x42\x03\xe0\x41\x01\x12+\n\x1escale_down_min_worker_fraction\x18\x04 \x01(\x01\x42\x03\xe0\x41\x01"s\n$InstanceGroupAutoscalingPolicyConfig\x12\x1a\n\rmin_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1a\n\rmax_instances\x18\x02 \x01(\x05\x42\x03\xe0\x41\x02\x12\x13\n\x06weight\x18\x03 \x01(\x05\x42\x03\xe0\x41\x01"\xa5\x01\n\x1e\x43reateAutoscalingPolicyRequest\x12\x41\n\x06parent\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\x12)dataproc.googleapis.com/AutoscalingPolicy\x12@\n\x06policy\x18\x02 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingPolicyB\x03\xe0\x41\x02"^\n\x1bGetAutoscalingPolicyRequest\x12?\n\x04name\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\n)dataproc.googleapis.com/AutoscalingPolicy"b\n\x1eUpdateAutoscalingPolicyRequest\x12@\n\x06policy\x18\x01 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingPolicyB\x03\xe0\x41\x02"a\n\x1e\x44\x65leteAutoscalingPolicyRequest\x12?\n\x04name\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\n)dataproc.googleapis.com/AutoscalingPolicy"\x94\x01\n\x1eListAutoscalingPoliciesRequest\x12\x41\n\x06parent\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\x12)dataproc.googleapis.com/AutoscalingPolicy\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"\x83\x01\n\x1fListAutoscalingPoliciesResponse\x12\x42\n\x08policies\x18\x01 \x03(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingPolicyB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03\x32\xae\x0b\n\x18\x41utoscalingPolicyService\x12\x9c\x02\n\x17\x43reateAutoscalingPolicy\x12\x38.google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest\x1a+.google.cloud.dataproc.v1.AutoscalingPolicy"\x99\x01\x82\xd3\xe4\x93\x02\x82\x01"7/v1/{parent=projects/*/locations/*}/autoscalingPolicies:\x06policyZ?"5/v1/{parent=projects/*/regions/*}/autoscalingPolicies:\x06policy\xda\x41\rparent,policy\x12\xa3\x02\n\x17UpdateAutoscalingPolicy\x12\x38.google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest\x1a+.google.cloud.dataproc.v1.AutoscalingPolicy"\xa0\x01\x82\xd3\xe4\x93\x02\x90\x01\x1a>/v1/{policy.name=projects/*/locations/*/autoscalingPolicies/*}:\x06policyZF\x1a`__ for more - information. Bounds: [0.0, 1.0]. - scale_down_factor: - Required. Fraction of average YARN pending memory in the last - cooldown period for which to remove workers. A scale-down - factor of 1 will result in scaling down so that there is no - available memory remaining after the update (more aggressive - scaling). A scale-down factor of 0 disables removing workers, - which can be beneficial for autoscaling a single job. See `How - autoscaling works - `__ for more - information. Bounds: [0.0, 1.0]. - scale_up_min_worker_fraction: - Optional. Minimum scale-up threshold as a fraction of total - cluster size before scaling occurs. For example, in a - 20-worker cluster, a threshold of 0.1 means the autoscaler - must recommend at least a 2-worker scale-up for the cluster to - scale. A threshold of 0 means the autoscaler will scale up on - any recommended change. Bounds: [0.0, 1.0]. Default: 0.0. - scale_down_min_worker_fraction: - Optional. Minimum scale-down threshold as a fraction of total - cluster size before scaling occurs. For example, in a - 20-worker cluster, a threshold of 0.1 means the autoscaler - must recommend at least a 2 worker scale-down for the cluster - to scale. A threshold of 0 means the autoscaler will scale - down on any recommended change. Bounds: [0.0, 1.0]. Default: - 0.0. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.BasicYarnAutoscalingConfig) - }, -) -_sym_db.RegisterMessage(BasicYarnAutoscalingConfig) - -InstanceGroupAutoscalingPolicyConfig = _reflection.GeneratedProtocolMessageType( - "InstanceGroupAutoscalingPolicyConfig", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCEGROUPAUTOSCALINGPOLICYCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """Configuration for the size bounds of an instance group, including its - proportional size to other groups. - - Attributes: - min_instances: - Optional. Minimum number of instances for this group. Primary - workers - Bounds: [2, max_instances]. Default: 2. Secondary - workers - Bounds: [0, max_instances]. Default: 0. - max_instances: - Required. Maximum number of instances for this group. Required - for primary workers. Note that by default, clusters will not - use secondary workers. Required for secondary workers if the - minimum secondary instances is set. Primary workers - Bounds: - [min_instances, ). Secondary workers - Bounds: [min_instances, - ). Default: 0. - weight: - Optional. Weight for the instance group, which is used to - determine the fraction of total workers in the cluster from - this instance group. For example, if primary workers have - weight 2, and secondary workers have weight 1, the cluster - will have approximately 2 primary workers for each secondary - worker. The cluster may not reach the specified balance if - constrained by min/max bounds or other autoscaling settings. - For example, if ``max_instances`` for secondary workers is 0, - then only primary workers will be added. The cluster can also - be out of balance when created. If weight is not set on any - instance group, the cluster will default to equal weight for - all groups: the cluster will attempt to maintain an equal - number of workers in each group within the configured size - bounds for each group. If weight is set for one group only, - the cluster will default to zero weight on the unset group. - For example if weight is set only on primary workers, the - cluster will use primary workers only and no secondary - workers. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.InstanceGroupAutoscalingPolicyConfig) - }, -) -_sym_db.RegisterMessage(InstanceGroupAutoscalingPolicyConfig) - -CreateAutoscalingPolicyRequest = _reflection.GeneratedProtocolMessageType( - "CreateAutoscalingPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEAUTOSCALINGPOLICYREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """A request to create an autoscaling policy. - - Attributes: - parent: - Required. The “resource name” of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.autoscalingPolicies.create``, the resource - name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.autoscalingPolicies.create``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - policy: - Required. The autoscaling policy to create. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.CreateAutoscalingPolicyRequest) - }, -) -_sym_db.RegisterMessage(CreateAutoscalingPolicyRequest) - -GetAutoscalingPolicyRequest = _reflection.GeneratedProtocolMessageType( - "GetAutoscalingPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETAUTOSCALINGPOLICYREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """A request to fetch an autoscaling policy. - - Attributes: - name: - Required. The “resource name” of the autoscaling policy, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.autoscalingPolicies.get``, the resource - name of the policy has the following format: ``projects/ - {project_id}/regions/{region}/autoscalingPolicies/{policy_id}` - ` - For ``projects.locations.autoscalingPolicies.get``, the - resource name of the policy has the following format: `` - projects/{project_id}/locations/{location}/autoscalingPolicies - /{policy_id}`` - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GetAutoscalingPolicyRequest) - }, -) -_sym_db.RegisterMessage(GetAutoscalingPolicyRequest) - -UpdateAutoscalingPolicyRequest = _reflection.GeneratedProtocolMessageType( - "UpdateAutoscalingPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEAUTOSCALINGPOLICYREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """A request to update an autoscaling policy. - - Attributes: - policy: - Required. The updated autoscaling policy. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.UpdateAutoscalingPolicyRequest) - }, -) -_sym_db.RegisterMessage(UpdateAutoscalingPolicyRequest) - -DeleteAutoscalingPolicyRequest = _reflection.GeneratedProtocolMessageType( - "DeleteAutoscalingPolicyRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEAUTOSCALINGPOLICYREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """A request to delete an autoscaling policy. Autoscaling policies in - use by one or more clusters will not be deleted. - - Attributes: - name: - Required. The “resource name” of the autoscaling policy, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.autoscalingPolicies.delete``, the resource - name of the policy has the following format: ``projects/{pr - oject_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - For ``projects.locations.autoscalingPolicies.delete``, the - resource name of the policy has the following format: `` - projects/{project_id}/locations/{location}/autoscalingPolicies - /{policy_id}`` - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DeleteAutoscalingPolicyRequest) - }, -) -_sym_db.RegisterMessage(DeleteAutoscalingPolicyRequest) - -ListAutoscalingPoliciesRequest = _reflection.GeneratedProtocolMessageType( - "ListAutoscalingPoliciesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTAUTOSCALINGPOLICIESREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """A request to list autoscaling policies in a project. - - Attributes: - parent: - Required. The “resource name” of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.autoscalingPolicies.list``, the resource - name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.autoscalingPolicies.list``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size: - Optional. The maximum number of results to return in each - response. Must be less than or equal to 1000. Defaults to 100. - page_token: - Optional. The page token, returned by a previous call, to - request the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListAutoscalingPoliciesRequest) - }, -) -_sym_db.RegisterMessage(ListAutoscalingPoliciesRequest) - -ListAutoscalingPoliciesResponse = _reflection.GeneratedProtocolMessageType( - "ListAutoscalingPoliciesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTAUTOSCALINGPOLICIESRESPONSE, - "__module__": "google.cloud.dataproc_v1.proto.autoscaling_policies_pb2", - "__doc__": """A response to a request to list autoscaling policies in a project. - - Attributes: - policies: - Output only. Autoscaling policies list. - next_page_token: - Output only. This token is included in the response if there - are more results to fetch. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListAutoscalingPoliciesResponse) - }, -) -_sym_db.RegisterMessage(ListAutoscalingPoliciesResponse) - - -DESCRIPTOR._options = None -_AUTOSCALINGPOLICY.fields_by_name["name"]._options = None -_AUTOSCALINGPOLICY.fields_by_name["basic_algorithm"]._options = None -_AUTOSCALINGPOLICY.fields_by_name["worker_config"]._options = None -_AUTOSCALINGPOLICY.fields_by_name["secondary_worker_config"]._options = None -_AUTOSCALINGPOLICY._options = None -_BASICAUTOSCALINGALGORITHM.fields_by_name["yarn_config"]._options = None -_BASICAUTOSCALINGALGORITHM.fields_by_name["cooldown_period"]._options = None -_BASICYARNAUTOSCALINGCONFIG.fields_by_name[ - "graceful_decommission_timeout" -]._options = None -_BASICYARNAUTOSCALINGCONFIG.fields_by_name["scale_up_factor"]._options = None -_BASICYARNAUTOSCALINGCONFIG.fields_by_name["scale_down_factor"]._options = None -_BASICYARNAUTOSCALINGCONFIG.fields_by_name[ - "scale_up_min_worker_fraction" -]._options = None -_BASICYARNAUTOSCALINGCONFIG.fields_by_name[ - "scale_down_min_worker_fraction" -]._options = None -_INSTANCEGROUPAUTOSCALINGPOLICYCONFIG.fields_by_name["min_instances"]._options = None -_INSTANCEGROUPAUTOSCALINGPOLICYCONFIG.fields_by_name["max_instances"]._options = None -_INSTANCEGROUPAUTOSCALINGPOLICYCONFIG.fields_by_name["weight"]._options = None -_CREATEAUTOSCALINGPOLICYREQUEST.fields_by_name["parent"]._options = None -_CREATEAUTOSCALINGPOLICYREQUEST.fields_by_name["policy"]._options = None -_GETAUTOSCALINGPOLICYREQUEST.fields_by_name["name"]._options = None -_UPDATEAUTOSCALINGPOLICYREQUEST.fields_by_name["policy"]._options = None -_DELETEAUTOSCALINGPOLICYREQUEST.fields_by_name["name"]._options = None -_LISTAUTOSCALINGPOLICIESREQUEST.fields_by_name["parent"]._options = None -_LISTAUTOSCALINGPOLICIESREQUEST.fields_by_name["page_size"]._options = None -_LISTAUTOSCALINGPOLICIESREQUEST.fields_by_name["page_token"]._options = None -_LISTAUTOSCALINGPOLICIESRESPONSE.fields_by_name["policies"]._options = None -_LISTAUTOSCALINGPOLICIESRESPONSE.fields_by_name["next_page_token"]._options = None - -_AUTOSCALINGPOLICYSERVICE = _descriptor.ServiceDescriptor( - name="AutoscalingPolicyService", - full_name="google.cloud.dataproc.v1.AutoscalingPolicyService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=2103, - serialized_end=3557, - methods=[ - _descriptor.MethodDescriptor( - name="CreateAutoscalingPolicy", - full_name="google.cloud.dataproc.v1.AutoscalingPolicyService.CreateAutoscalingPolicy", - index=0, - containing_service=None, - input_type=_CREATEAUTOSCALINGPOLICYREQUEST, - output_type=_AUTOSCALINGPOLICY, - serialized_options=b'\202\323\344\223\002\202\001"7/v1/{parent=projects/*/locations/*}/autoscalingPolicies:\006policyZ?"5/v1/{parent=projects/*/regions/*}/autoscalingPolicies:\006policy\332A\rparent,policy', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateAutoscalingPolicy", - full_name="google.cloud.dataproc.v1.AutoscalingPolicyService.UpdateAutoscalingPolicy", - index=1, - containing_service=None, - input_type=_UPDATEAUTOSCALINGPOLICYREQUEST, - output_type=_AUTOSCALINGPOLICY, - serialized_options=b"\202\323\344\223\002\220\001\032>/v1/{policy.name=projects/*/locations/*/autoscalingPolicies/*}:\006policyZF\032\n\x07metrics\x18\t \x01(\x0b\x32(.google.cloud.dataproc.v1.ClusterMetricsB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x92\x07\n\rClusterConfig\x12\x1a\n\rconfig_bucket\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0btemp_bucket\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12K\n\x12gce_cluster_config\x18\x08 \x01(\x0b\x32*.google.cloud.dataproc.v1.GceClusterConfigB\x03\xe0\x41\x01\x12I\n\rmaster_config\x18\t \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12I\n\rworker_config\x18\n \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12S\n\x17secondary_worker_config\x18\x0c \x01(\x0b\x32-.google.cloud.dataproc.v1.InstanceGroupConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsoftware_config\x18\r \x01(\x0b\x32(.google.cloud.dataproc.v1.SoftwareConfigB\x03\xe0\x41\x01\x12W\n\x16initialization_actions\x18\x0b \x03(\x0b\x32\x32.google.cloud.dataproc.v1.NodeInitializationActionB\x03\xe0\x41\x01\x12J\n\x11\x65ncryption_config\x18\x0f \x01(\x0b\x32*.google.cloud.dataproc.v1.EncryptionConfigB\x03\xe0\x41\x01\x12L\n\x12\x61utoscaling_config\x18\x12 \x01(\x0b\x32+.google.cloud.dataproc.v1.AutoscalingConfigB\x03\xe0\x41\x01\x12\x46\n\x0fsecurity_config\x18\x10 \x01(\x0b\x32(.google.cloud.dataproc.v1.SecurityConfigB\x03\xe0\x41\x01\x12H\n\x10lifecycle_config\x18\x11 \x01(\x0b\x32).google.cloud.dataproc.v1.LifecycleConfigB\x03\xe0\x41\x01\x12\x46\n\x0f\x65ndpoint_config\x18\x13 \x01(\x0b\x32(.google.cloud.dataproc.v1.EndpointConfigB\x03\xe0\x41\x01"\xba\x01\n\x0e\x45ndpointConfig\x12P\n\nhttp_ports\x18\x01 \x03(\x0b\x32\x37.google.cloud.dataproc.v1.EndpointConfig.HttpPortsEntryB\x03\xe0\x41\x03\x12$\n\x17\x65nable_http_port_access\x18\x02 \x01(\x08\x42\x03\xe0\x41\x01\x1a\x30\n\x0eHttpPortsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01",\n\x11\x41utoscalingConfig\x12\x17\n\npolicy_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01"4\n\x10\x45ncryptionConfig\x12 \n\x13gce_pd_kms_key_name\x18\x01 \x01(\tB\x03\xe0\x41\x01"\x9f\x03\n\x10GceClusterConfig\x12\x15\n\x08zone_uri\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0bnetwork_uri\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0esubnetwork_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10internal_ip_only\x18\x07 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1c\n\x0fservice_account\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12#\n\x16service_account_scopes\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x0c\n\x04tags\x18\x04 \x03(\t\x12J\n\x08metadata\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry\x12P\n\x14reservation_affinity\x18\x0b \x01(\x0b\x32-.google.cloud.dataproc.v1.ReservationAffinityB\x03\xe0\x41\x01\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xcd\x04\n\x13InstanceGroupConfig\x12\x1a\n\rnum_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0einstance_names\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x16\n\timage_uri\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10machine_type_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12>\n\x0b\x64isk_config\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.DiskConfigB\x03\xe0\x41\x01\x12\x1b\n\x0eis_preemptible\x18\x06 \x01(\x08\x42\x03\xe0\x41\x03\x12Y\n\x0epreemptibility\x18\n \x01(\x0e\x32<.google.cloud.dataproc.v1.InstanceGroupConfig.PreemptibilityB\x03\xe0\x41\x01\x12O\n\x14managed_group_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1.ManagedGroupConfigB\x03\xe0\x41\x03\x12\x46\n\x0c\x61\x63\x63\x65lerators\x18\x08 \x03(\x0b\x32+.google.cloud.dataproc.v1.AcceleratorConfigB\x03\xe0\x41\x01\x12\x1d\n\x10min_cpu_platform\x18\t \x01(\tB\x03\xe0\x41\x01"V\n\x0ePreemptibility\x12\x1e\n\x1aPREEMPTIBILITY_UNSPECIFIED\x10\x00\x12\x13\n\x0fNON_PREEMPTIBLE\x10\x01\x12\x0f\n\x0bPREEMPTIBLE\x10\x02"c\n\x12ManagedGroupConfig\x12#\n\x16instance_template_name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12(\n\x1binstance_group_manager_name\x18\x02 \x01(\tB\x03\xe0\x41\x03"L\n\x11\x41\x63\x63\x65leratorConfig\x12\x1c\n\x14\x61\x63\x63\x65lerator_type_uri\x18\x01 \x01(\t\x12\x19\n\x11\x61\x63\x63\x65lerator_count\x18\x02 \x01(\x05"f\n\nDiskConfig\x12\x1b\n\x0e\x62oot_disk_type\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x1e\n\x11\x62oot_disk_size_gb\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1b\n\x0enum_local_ssds\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01"s\n\x18NodeInitializationAction\x12\x1c\n\x0f\x65xecutable_file\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x39\n\x11\x65xecution_timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\x84\x03\n\rClusterStatus\x12\x41\n\x05state\x18\x01 \x01(\x0e\x32-.google.cloud.dataproc.v1.ClusterStatus.StateB\x03\xe0\x41\x03\x12\x16\n\x06\x64\x65tail\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12G\n\x08substate\x18\x04 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.ClusterStatus.SubstateB\x03\xe0\x41\x03"V\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\t\n\x05\x45RROR\x10\x03\x12\x0c\n\x08\x44\x45LETING\x10\x04\x12\x0c\n\x08UPDATING\x10\x05"<\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNHEALTHY\x10\x01\x12\x10\n\x0cSTALE_STATUS\x10\x02"S\n\x0eSecurityConfig\x12\x41\n\x0fkerberos_config\x18\x01 \x01(\x0b\x32(.google.cloud.dataproc.v1.KerberosConfig"\x90\x04\n\x0eKerberosConfig\x12\x1c\n\x0f\x65nable_kerberos\x18\x01 \x01(\x08\x42\x03\xe0\x41\x01\x12(\n\x1broot_principal_password_uri\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x18\n\x0bkms_key_uri\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0ckeystore_uri\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0etruststore_uri\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12"\n\x15keystore_password_uri\x18\x06 \x01(\tB\x03\xe0\x41\x01\x12\x1d\n\x10key_password_uri\x18\x07 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17truststore_password_uri\x18\x08 \x01(\tB\x03\xe0\x41\x01\x12$\n\x17\x63ross_realm_trust_realm\x18\t \x01(\tB\x03\xe0\x41\x01\x12"\n\x15\x63ross_realm_trust_kdc\x18\n \x01(\tB\x03\xe0\x41\x01\x12+\n\x1e\x63ross_realm_trust_admin_server\x18\x0b \x01(\tB\x03\xe0\x41\x01\x12\x32\n%cross_realm_trust_shared_password_uri\x18\x0c \x01(\tB\x03\xe0\x41\x01\x12\x1b\n\x0ekdc_db_key_uri\x18\r \x01(\tB\x03\xe0\x41\x01\x12\x1f\n\x12tgt_lifetime_hours\x18\x0e \x01(\x05\x42\x03\xe0\x41\x01\x12\x12\n\x05realm\x18\x0f \x01(\tB\x03\xe0\x41\x01"\xf9\x01\n\x0eSoftwareConfig\x12\x1a\n\rimage_version\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x02 \x03(\x0b\x32\x38.google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntryB\x03\xe0\x41\x01\x12\x45\n\x13optional_components\x18\x03 \x03(\x0e\x32#.google.cloud.dataproc.v1.ComponentB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x83\x02\n\x0fLifecycleConfig\x12\x37\n\x0fidle_delete_ttl\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12;\n\x10\x61uto_delete_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x01H\x00\x12\x39\n\x0f\x61uto_delete_ttl\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01H\x00\x12\x38\n\x0fidle_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x42\x05\n\x03ttl"\x9a\x02\n\x0e\x43lusterMetrics\x12O\n\x0chdfs_metrics\x18\x01 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry\x12O\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32\x39.google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x96\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x02 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xae\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x37\n\x07\x63luster\x18\x03 \x01(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"n\n\x14ListClustersResponse\x12\x38\n\x08\x63lusters\x18\x01 \x03(\x0b\x32!.google.cloud.dataproc.v1.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xf8\x01\n\x13ReservationAffinity\x12Y\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x32.google.cloud.dataproc.v1.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xff\x0c\n\x11\x43lusterController\x12\x80\x02\n\rCreateCluster\x12..google.cloud.dataproc.v1.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\x9f\x01\x82\xd3\xe4\x93\x02>"3/v1/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x19project_id,region,cluster\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xa8\x02\n\rUpdateCluster\x12..google.cloud.dataproc.v1.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xc7\x01\x82\xd3\xe4\x93\x02M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x32project_id,region,cluster_name,cluster,update_mask\xca\x41<\n\x07\x43luster\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\x99\x02\n\rDeleteCluster\x12..google.cloud.dataproc.v1.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xb8\x01\x82\xd3\xe4\x93\x02\x44*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\xca\x41J\n\x15google.protobuf.Empty\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x12\xc9\x01\n\nGetCluster\x12+.google.cloud.dataproc.v1.GetClusterRequest\x1a!.google.cloud.dataproc.v1.Cluster"k\x82\xd3\xe4\x93\x02\x44\x12\x42/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41\x1eproject_id,region,cluster_name\x12\xd9\x01\n\x0cListClusters\x12-.google.cloud.dataproc.v1.ListClustersRequest\x1a..google.cloud.dataproc.v1.ListClustersResponse"j\x82\xd3\xe4\x93\x02\x35\x12\x33/v1/projects/{project_id}/regions/{region}/clusters\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\xaa\x02\n\x0f\x44iagnoseCluster\x12\x30.google.cloud.dataproc.v1.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xc5\x01\x82\xd3\xe4\x93\x02P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41\x1eproject_id,region,cluster_name\xca\x41K\n\x16\x44iagnoseClusterResults\x12\x31google.cloud.dataproc.v1.ClusterOperationMetadata\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBq\n\x1c\x63om.google.cloud.dataproc.v1B\rClustersProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_shared__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_INSTANCEGROUPCONFIG_PREEMPTIBILITY = _descriptor.EnumDescriptor( - name="Preemptibility", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.Preemptibility", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="PREEMPTIBILITY_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NON_PREEMPTIBLE", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PREEMPTIBLE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=2936, - serialized_end=3022, -) -_sym_db.RegisterEnumDescriptor(_INSTANCEGROUPCONFIG_PREEMPTIBILITY) - -_CLUSTERSTATUS_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1.ClusterStatus.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNKNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ERROR", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DELETING", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UPDATING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3665, - serialized_end=3751, -) -_sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_STATE) - -_CLUSTERSTATUS_SUBSTATE = _descriptor.EnumDescriptor( - name="Substate", - full_name="google.cloud.dataproc.v1.ClusterStatus.Substate", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UNHEALTHY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STALE_STATUS", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3753, - serialized_end=3813, -) -_sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_SUBSTATE) - -_RESERVATIONAFFINITY_TYPE = _descriptor.EnumDescriptor( - name="Type", - full_name="google.cloud.dataproc.v1.ReservationAffinity.Type", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NO_RESERVATION", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ANY_RESERVATION", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SPECIFIC_RESERVATION", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=6488, - serialized_end=6583, -) -_sym_db.RegisterEnumDescriptor(_RESERVATIONAFFINITY_TYPE) - - -_CLUSTER_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1.Cluster.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.Cluster.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.Cluster.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=761, - serialized_end=806, -) - -_CLUSTER = _descriptor.Descriptor( - name="Cluster", - full_name="google.cloud.dataproc.v1.Cluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.Cluster.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.Cluster.cluster_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="config", - full_name="google.cloud.dataproc.v1.Cluster.config", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1.Cluster.labels", - index=3, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1.Cluster.status", - index=4, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status_history", - full_name="google.cloud.dataproc.v1.Cluster.status_history", - index=5, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1.Cluster.cluster_uuid", - index=6, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="metrics", - full_name="google.cloud.dataproc.v1.Cluster.metrics", - index=7, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTER_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=345, - serialized_end=806, -) - - -_CLUSTERCONFIG = _descriptor.Descriptor( - name="ClusterConfig", - full_name="google.cloud.dataproc.v1.ClusterConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="config_bucket", - full_name="google.cloud.dataproc.v1.ClusterConfig.config_bucket", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="temp_bucket", - full_name="google.cloud.dataproc.v1.ClusterConfig.temp_bucket", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="gce_cluster_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.gce_cluster_config", - index=2, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="master_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.master_config", - index=3, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="worker_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.worker_config", - index=4, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="secondary_worker_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.secondary_worker_config", - index=5, - number=12, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="software_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.software_config", - index=6, - number=13, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="initialization_actions", - full_name="google.cloud.dataproc.v1.ClusterConfig.initialization_actions", - index=7, - number=11, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="encryption_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.encryption_config", - index=8, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="autoscaling_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.autoscaling_config", - index=9, - number=18, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="security_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.security_config", - index=10, - number=16, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="lifecycle_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.lifecycle_config", - index=11, - number=17, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="endpoint_config", - full_name="google.cloud.dataproc.v1.ClusterConfig.endpoint_config", - index=12, - number=19, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=809, - serialized_end=1723, -) - - -_ENDPOINTCONFIG_HTTPPORTSENTRY = _descriptor.Descriptor( - name="HttpPortsEntry", - full_name="google.cloud.dataproc.v1.EndpointConfig.HttpPortsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.EndpointConfig.HttpPortsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.EndpointConfig.HttpPortsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1864, - serialized_end=1912, -) - -_ENDPOINTCONFIG = _descriptor.Descriptor( - name="EndpointConfig", - full_name="google.cloud.dataproc.v1.EndpointConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="http_ports", - full_name="google.cloud.dataproc.v1.EndpointConfig.http_ports", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_http_port_access", - full_name="google.cloud.dataproc.v1.EndpointConfig.enable_http_port_access", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_ENDPOINTCONFIG_HTTPPORTSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1726, - serialized_end=1912, -) - - -_AUTOSCALINGCONFIG = _descriptor.Descriptor( - name="AutoscalingConfig", - full_name="google.cloud.dataproc.v1.AutoscalingConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="policy_uri", - full_name="google.cloud.dataproc.v1.AutoscalingConfig.policy_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1914, - serialized_end=1958, -) - - -_ENCRYPTIONCONFIG = _descriptor.Descriptor( - name="EncryptionConfig", - full_name="google.cloud.dataproc.v1.EncryptionConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gce_pd_kms_key_name", - full_name="google.cloud.dataproc.v1.EncryptionConfig.gce_pd_kms_key_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1960, - serialized_end=2012, -) - - -_GCECLUSTERCONFIG_METADATAENTRY = _descriptor.Descriptor( - name="MetadataEntry", - full_name="google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2383, - serialized_end=2430, -) - -_GCECLUSTERCONFIG = _descriptor.Descriptor( - name="GceClusterConfig", - full_name="google.cloud.dataproc.v1.GceClusterConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="zone_uri", - full_name="google.cloud.dataproc.v1.GceClusterConfig.zone_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="network_uri", - full_name="google.cloud.dataproc.v1.GceClusterConfig.network_uri", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="subnetwork_uri", - full_name="google.cloud.dataproc.v1.GceClusterConfig.subnetwork_uri", - index=2, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="internal_ip_only", - full_name="google.cloud.dataproc.v1.GceClusterConfig.internal_ip_only", - index=3, - number=7, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="service_account", - full_name="google.cloud.dataproc.v1.GceClusterConfig.service_account", - index=4, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="service_account_scopes", - full_name="google.cloud.dataproc.v1.GceClusterConfig.service_account_scopes", - index=5, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tags", - full_name="google.cloud.dataproc.v1.GceClusterConfig.tags", - index=6, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.cloud.dataproc.v1.GceClusterConfig.metadata", - index=7, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="reservation_affinity", - full_name="google.cloud.dataproc.v1.GceClusterConfig.reservation_affinity", - index=8, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_GCECLUSTERCONFIG_METADATAENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2015, - serialized_end=2430, -) - - -_INSTANCEGROUPCONFIG = _descriptor.Descriptor( - name="InstanceGroupConfig", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="num_instances", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.num_instances", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_names", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.instance_names", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_uri", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.image_uri", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="machine_type_uri", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.machine_type_uri", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="disk_config", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.disk_config", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="is_preemptible", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.is_preemptible", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="preemptibility", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.preemptibility", - index=6, - number=10, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="managed_group_config", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.managed_group_config", - index=7, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="accelerators", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.accelerators", - index=8, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="min_cpu_platform", - full_name="google.cloud.dataproc.v1.InstanceGroupConfig.min_cpu_platform", - index=9, - number=9, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_INSTANCEGROUPCONFIG_PREEMPTIBILITY,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2433, - serialized_end=3022, -) - - -_MANAGEDGROUPCONFIG = _descriptor.Descriptor( - name="ManagedGroupConfig", - full_name="google.cloud.dataproc.v1.ManagedGroupConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance_template_name", - full_name="google.cloud.dataproc.v1.ManagedGroupConfig.instance_template_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_group_manager_name", - full_name="google.cloud.dataproc.v1.ManagedGroupConfig.instance_group_manager_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3024, - serialized_end=3123, -) - - -_ACCELERATORCONFIG = _descriptor.Descriptor( - name="AcceleratorConfig", - full_name="google.cloud.dataproc.v1.AcceleratorConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="accelerator_type_uri", - full_name="google.cloud.dataproc.v1.AcceleratorConfig.accelerator_type_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="accelerator_count", - full_name="google.cloud.dataproc.v1.AcceleratorConfig.accelerator_count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3125, - serialized_end=3201, -) - - -_DISKCONFIG = _descriptor.Descriptor( - name="DiskConfig", - full_name="google.cloud.dataproc.v1.DiskConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="boot_disk_type", - full_name="google.cloud.dataproc.v1.DiskConfig.boot_disk_type", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="boot_disk_size_gb", - full_name="google.cloud.dataproc.v1.DiskConfig.boot_disk_size_gb", - index=1, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="num_local_ssds", - full_name="google.cloud.dataproc.v1.DiskConfig.num_local_ssds", - index=2, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3203, - serialized_end=3305, -) - - -_NODEINITIALIZATIONACTION = _descriptor.Descriptor( - name="NodeInitializationAction", - full_name="google.cloud.dataproc.v1.NodeInitializationAction", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="executable_file", - full_name="google.cloud.dataproc.v1.NodeInitializationAction.executable_file", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="execution_timeout", - full_name="google.cloud.dataproc.v1.NodeInitializationAction.execution_timeout", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3307, - serialized_end=3422, -) - - -_CLUSTERSTATUS = _descriptor.Descriptor( - name="ClusterStatus", - full_name="google.cloud.dataproc.v1.ClusterStatus", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1.ClusterStatus.state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="detail", - full_name="google.cloud.dataproc.v1.ClusterStatus.detail", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state_start_time", - full_name="google.cloud.dataproc.v1.ClusterStatus.state_start_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="substate", - full_name="google.cloud.dataproc.v1.ClusterStatus.substate", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_CLUSTERSTATUS_STATE, _CLUSTERSTATUS_SUBSTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3425, - serialized_end=3813, -) - - -_SECURITYCONFIG = _descriptor.Descriptor( - name="SecurityConfig", - full_name="google.cloud.dataproc.v1.SecurityConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="kerberos_config", - full_name="google.cloud.dataproc.v1.SecurityConfig.kerberos_config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3815, - serialized_end=3898, -) - - -_KERBEROSCONFIG = _descriptor.Descriptor( - name="KerberosConfig", - full_name="google.cloud.dataproc.v1.KerberosConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="enable_kerberos", - full_name="google.cloud.dataproc.v1.KerberosConfig.enable_kerberos", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="root_principal_password_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.root_principal_password_uri", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="kms_key_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.kms_key_uri", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="keystore_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.keystore_uri", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="truststore_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.truststore_uri", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="keystore_password_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.keystore_password_uri", - index=5, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="key_password_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.key_password_uri", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="truststore_password_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.truststore_password_uri", - index=7, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_realm", - full_name="google.cloud.dataproc.v1.KerberosConfig.cross_realm_trust_realm", - index=8, - number=9, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_kdc", - full_name="google.cloud.dataproc.v1.KerberosConfig.cross_realm_trust_kdc", - index=9, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_admin_server", - full_name="google.cloud.dataproc.v1.KerberosConfig.cross_realm_trust_admin_server", - index=10, - number=11, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_shared_password_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.cross_realm_trust_shared_password_uri", - index=11, - number=12, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="kdc_db_key_uri", - full_name="google.cloud.dataproc.v1.KerberosConfig.kdc_db_key_uri", - index=12, - number=13, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tgt_lifetime_hours", - full_name="google.cloud.dataproc.v1.KerberosConfig.tgt_lifetime_hours", - index=13, - number=14, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="realm", - full_name="google.cloud.dataproc.v1.KerberosConfig.realm", - index=14, - number=15, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3901, - serialized_end=4429, -) - - -_SOFTWARECONFIG_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4632, - serialized_end=4681, -) - -_SOFTWARECONFIG = _descriptor.Descriptor( - name="SoftwareConfig", - full_name="google.cloud.dataproc.v1.SoftwareConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="image_version", - full_name="google.cloud.dataproc.v1.SoftwareConfig.image_version", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.SoftwareConfig.properties", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optional_components", - full_name="google.cloud.dataproc.v1.SoftwareConfig.optional_components", - index=2, - number=3, - type=14, - cpp_type=8, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SOFTWARECONFIG_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4432, - serialized_end=4681, -) - - -_LIFECYCLECONFIG = _descriptor.Descriptor( - name="LifecycleConfig", - full_name="google.cloud.dataproc.v1.LifecycleConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="idle_delete_ttl", - full_name="google.cloud.dataproc.v1.LifecycleConfig.idle_delete_ttl", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="auto_delete_time", - full_name="google.cloud.dataproc.v1.LifecycleConfig.auto_delete_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="auto_delete_ttl", - full_name="google.cloud.dataproc.v1.LifecycleConfig.auto_delete_ttl", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="idle_start_time", - full_name="google.cloud.dataproc.v1.LifecycleConfig.idle_start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="ttl", - full_name="google.cloud.dataproc.v1.LifecycleConfig.ttl", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4684, - serialized_end=4943, -) - - -_CLUSTERMETRICS_HDFSMETRICSENTRY = _descriptor.Descriptor( - name="HdfsMetricsEntry", - full_name="google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry.value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5126, - serialized_end=5176, -) - -_CLUSTERMETRICS_YARNMETRICSENTRY = _descriptor.Descriptor( - name="YarnMetricsEntry", - full_name="google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry.value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5178, - serialized_end=5228, -) - -_CLUSTERMETRICS = _descriptor.Descriptor( - name="ClusterMetrics", - full_name="google.cloud.dataproc.v1.ClusterMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="hdfs_metrics", - full_name="google.cloud.dataproc.v1.ClusterMetrics.hdfs_metrics", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="yarn_metrics", - full_name="google.cloud.dataproc.v1.ClusterMetrics.yarn_metrics", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTERMETRICS_HDFSMETRICSENTRY, _CLUSTERMETRICS_YARNMETRICSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4946, - serialized_end=5228, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name="CreateClusterRequest", - full_name="google.cloud.dataproc.v1.CreateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.CreateClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.CreateClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.cloud.dataproc.v1.CreateClusterRequest.cluster", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1.CreateClusterRequest.request_id", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5231, - serialized_end=5381, -) - - -_UPDATECLUSTERREQUEST = _descriptor.Descriptor( - name="UpdateClusterRequest", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.region", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.cluster", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="graceful_decommission_timeout", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.graceful_decommission_timeout", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.update_mask", - index=5, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1.UpdateClusterRequest.request_id", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5384, - serialized_end=5686, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name="DeleteClusterRequest", - full_name="google.cloud.dataproc.v1.DeleteClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.DeleteClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.DeleteClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.DeleteClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1.DeleteClusterRequest.cluster_uuid", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1.DeleteClusterRequest.request_id", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5689, - serialized_end=5836, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name="GetClusterRequest", - full_name="google.cloud.dataproc.v1.GetClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.GetClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.GetClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.GetClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5838, - serialized_end=5930, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name="ListClustersRequest", - full_name="google.cloud.dataproc.v1.ListClustersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.ListClustersRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.ListClustersRequest.region", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.dataproc.v1.ListClustersRequest.filter", - index=2, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.dataproc.v1.ListClustersRequest.page_size", - index=3, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.dataproc.v1.ListClustersRequest.page_token", - index=4, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5933, - serialized_end=6070, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name="ListClustersResponse", - full_name="google.cloud.dataproc.v1.ListClustersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.cloud.dataproc.v1.ListClustersResponse.clusters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.dataproc.v1.ListClustersResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6072, - serialized_end=6182, -) - - -_DIAGNOSECLUSTERREQUEST = _descriptor.Descriptor( - name="DiagnoseClusterRequest", - full_name="google.cloud.dataproc.v1.DiagnoseClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.DiagnoseClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.DiagnoseClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.DiagnoseClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6184, - serialized_end=6281, -) - - -_DIAGNOSECLUSTERRESULTS = _descriptor.Descriptor( - name="DiagnoseClusterResults", - full_name="google.cloud.dataproc.v1.DiagnoseClusterResults", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_uri", - full_name="google.cloud.dataproc.v1.DiagnoseClusterResults.output_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6283, - serialized_end=6332, -) - - -_RESERVATIONAFFINITY = _descriptor.Descriptor( - name="ReservationAffinity", - full_name="google.cloud.dataproc.v1.ReservationAffinity", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consume_reservation_type", - full_name="google.cloud.dataproc.v1.ReservationAffinity.consume_reservation_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.ReservationAffinity.key", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.cloud.dataproc.v1.ReservationAffinity.values", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_RESERVATIONAFFINITY_TYPE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6335, - serialized_end=6583, -) - -_CLUSTER_LABELSENTRY.containing_type = _CLUSTER -_CLUSTER.fields_by_name["config"].message_type = _CLUSTERCONFIG -_CLUSTER.fields_by_name["labels"].message_type = _CLUSTER_LABELSENTRY -_CLUSTER.fields_by_name["status"].message_type = _CLUSTERSTATUS -_CLUSTER.fields_by_name["status_history"].message_type = _CLUSTERSTATUS -_CLUSTER.fields_by_name["metrics"].message_type = _CLUSTERMETRICS -_CLUSTERCONFIG.fields_by_name["gce_cluster_config"].message_type = _GCECLUSTERCONFIG -_CLUSTERCONFIG.fields_by_name["master_config"].message_type = _INSTANCEGROUPCONFIG -_CLUSTERCONFIG.fields_by_name["worker_config"].message_type = _INSTANCEGROUPCONFIG -_CLUSTERCONFIG.fields_by_name[ - "secondary_worker_config" -].message_type = _INSTANCEGROUPCONFIG -_CLUSTERCONFIG.fields_by_name["software_config"].message_type = _SOFTWARECONFIG -_CLUSTERCONFIG.fields_by_name[ - "initialization_actions" -].message_type = _NODEINITIALIZATIONACTION -_CLUSTERCONFIG.fields_by_name["encryption_config"].message_type = _ENCRYPTIONCONFIG -_CLUSTERCONFIG.fields_by_name["autoscaling_config"].message_type = _AUTOSCALINGCONFIG -_CLUSTERCONFIG.fields_by_name["security_config"].message_type = _SECURITYCONFIG -_CLUSTERCONFIG.fields_by_name["lifecycle_config"].message_type = _LIFECYCLECONFIG -_CLUSTERCONFIG.fields_by_name["endpoint_config"].message_type = _ENDPOINTCONFIG -_ENDPOINTCONFIG_HTTPPORTSENTRY.containing_type = _ENDPOINTCONFIG -_ENDPOINTCONFIG.fields_by_name[ - "http_ports" -].message_type = _ENDPOINTCONFIG_HTTPPORTSENTRY -_GCECLUSTERCONFIG_METADATAENTRY.containing_type = _GCECLUSTERCONFIG -_GCECLUSTERCONFIG.fields_by_name[ - "metadata" -].message_type = _GCECLUSTERCONFIG_METADATAENTRY -_GCECLUSTERCONFIG.fields_by_name[ - "reservation_affinity" -].message_type = _RESERVATIONAFFINITY -_INSTANCEGROUPCONFIG.fields_by_name["disk_config"].message_type = _DISKCONFIG -_INSTANCEGROUPCONFIG.fields_by_name[ - "preemptibility" -].enum_type = _INSTANCEGROUPCONFIG_PREEMPTIBILITY -_INSTANCEGROUPCONFIG.fields_by_name[ - "managed_group_config" -].message_type = _MANAGEDGROUPCONFIG -_INSTANCEGROUPCONFIG.fields_by_name["accelerators"].message_type = _ACCELERATORCONFIG -_INSTANCEGROUPCONFIG_PREEMPTIBILITY.containing_type = _INSTANCEGROUPCONFIG -_NODEINITIALIZATIONACTION.fields_by_name[ - "execution_timeout" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_CLUSTERSTATUS.fields_by_name["state"].enum_type = _CLUSTERSTATUS_STATE -_CLUSTERSTATUS.fields_by_name[ - "state_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CLUSTERSTATUS.fields_by_name["substate"].enum_type = _CLUSTERSTATUS_SUBSTATE -_CLUSTERSTATUS_STATE.containing_type = _CLUSTERSTATUS -_CLUSTERSTATUS_SUBSTATE.containing_type = _CLUSTERSTATUS -_SECURITYCONFIG.fields_by_name["kerberos_config"].message_type = _KERBEROSCONFIG -_SOFTWARECONFIG_PROPERTIESENTRY.containing_type = _SOFTWARECONFIG -_SOFTWARECONFIG.fields_by_name[ - "properties" -].message_type = _SOFTWARECONFIG_PROPERTIESENTRY -_SOFTWARECONFIG.fields_by_name[ - "optional_components" -].enum_type = google_dot_cloud_dot_dataproc__v1_dot_proto_dot_shared__pb2._COMPONENT -_LIFECYCLECONFIG.fields_by_name[ - "idle_delete_ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LIFECYCLECONFIG.fields_by_name[ - "idle_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LIFECYCLECONFIG.oneofs_by_name["ttl"].fields.append( - _LIFECYCLECONFIG.fields_by_name["auto_delete_time"] -) -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_time" -].containing_oneof = _LIFECYCLECONFIG.oneofs_by_name["ttl"] -_LIFECYCLECONFIG.oneofs_by_name["ttl"].fields.append( - _LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"] -) -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_ttl" -].containing_oneof = _LIFECYCLECONFIG.oneofs_by_name["ttl"] -_CLUSTERMETRICS_HDFSMETRICSENTRY.containing_type = _CLUSTERMETRICS -_CLUSTERMETRICS_YARNMETRICSENTRY.containing_type = _CLUSTERMETRICS -_CLUSTERMETRICS.fields_by_name[ - "hdfs_metrics" -].message_type = _CLUSTERMETRICS_HDFSMETRICSENTRY -_CLUSTERMETRICS.fields_by_name[ - "yarn_metrics" -].message_type = _CLUSTERMETRICS_YARNMETRICSENTRY -_CREATECLUSTERREQUEST.fields_by_name["cluster"].message_type = _CLUSTER -_UPDATECLUSTERREQUEST.fields_by_name["cluster"].message_type = _CLUSTER -_UPDATECLUSTERREQUEST.fields_by_name[ - "graceful_decommission_timeout" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_UPDATECLUSTERREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTCLUSTERSRESPONSE.fields_by_name["clusters"].message_type = _CLUSTER -_RESERVATIONAFFINITY.fields_by_name[ - "consume_reservation_type" -].enum_type = _RESERVATIONAFFINITY_TYPE -_RESERVATIONAFFINITY_TYPE.containing_type = _RESERVATIONAFFINITY -DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER -DESCRIPTOR.message_types_by_name["ClusterConfig"] = _CLUSTERCONFIG -DESCRIPTOR.message_types_by_name["EndpointConfig"] = _ENDPOINTCONFIG -DESCRIPTOR.message_types_by_name["AutoscalingConfig"] = _AUTOSCALINGCONFIG -DESCRIPTOR.message_types_by_name["EncryptionConfig"] = _ENCRYPTIONCONFIG -DESCRIPTOR.message_types_by_name["GceClusterConfig"] = _GCECLUSTERCONFIG -DESCRIPTOR.message_types_by_name["InstanceGroupConfig"] = _INSTANCEGROUPCONFIG -DESCRIPTOR.message_types_by_name["ManagedGroupConfig"] = _MANAGEDGROUPCONFIG -DESCRIPTOR.message_types_by_name["AcceleratorConfig"] = _ACCELERATORCONFIG -DESCRIPTOR.message_types_by_name["DiskConfig"] = _DISKCONFIG -DESCRIPTOR.message_types_by_name["NodeInitializationAction"] = _NODEINITIALIZATIONACTION -DESCRIPTOR.message_types_by_name["ClusterStatus"] = _CLUSTERSTATUS -DESCRIPTOR.message_types_by_name["SecurityConfig"] = _SECURITYCONFIG -DESCRIPTOR.message_types_by_name["KerberosConfig"] = _KERBEROSCONFIG -DESCRIPTOR.message_types_by_name["SoftwareConfig"] = _SOFTWARECONFIG -DESCRIPTOR.message_types_by_name["LifecycleConfig"] = _LIFECYCLECONFIG -DESCRIPTOR.message_types_by_name["ClusterMetrics"] = _CLUSTERMETRICS -DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["UpdateClusterRequest"] = _UPDATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name["DiagnoseClusterRequest"] = _DIAGNOSECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["DiagnoseClusterResults"] = _DIAGNOSECLUSTERRESULTS -DESCRIPTOR.message_types_by_name["ReservationAffinity"] = _RESERVATIONAFFINITY -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Cluster.LabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Describes the identifying information, config, and status of a cluster - of Compute Engine instances. - - Attributes: - project_id: - Required. The Google Cloud Platform project ID that the - cluster belongs to. - cluster_name: - Required. The cluster name. Cluster names within a project - must be unique. Names of deleted clusters can be reused. - config: - Required. The cluster config. Note that Dataproc may set - default values, and values may change when clusters are - updated. - labels: - Optional. The labels to associate with this cluster. Label - **keys** must contain 1 to 63 characters, and must conform to - `RFC 1035 `__. Label - **values** may be empty, but, if present, must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. No more than 32 - labels can be associated with a cluster. - status: - Output only. Cluster status. - status_history: - Output only. The previous cluster status. - cluster_uuid: - Output only. A cluster UUID (Unique Universal Identifier). - Dataproc generates this value when it creates the cluster. - metrics: - Output only. Contains cluster daemon metrics such as HDFS and - YARN stats. **Beta Feature**: This report is available for - testing purposes only. It may be changed before final release. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) -_sym_db.RegisterMessage(Cluster.LabelsEntry) - -ClusterConfig = _reflection.GeneratedProtocolMessageType( - "ClusterConfig", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """The cluster config. - - Attributes: - config_bucket: - Optional. A Cloud Storage bucket used to stage job - dependencies, config files, and job driver console output. If - you do not specify a staging bucket, Cloud Dataproc will - determine a Cloud Storage location (US, ASIA, or EU) for your - cluster’s staging bucket according to the Compute Engine zone - where your cluster is deployed, and then create and manage - this project-level, per-location bucket (see `Dataproc staging - bucket - `__). - temp_bucket: - Optional. A Cloud Storage bucket used to store ephemeral - cluster and jobs data, such as Spark and MapReduce history - files. If you do not specify a temp bucket, Dataproc will - determine a Cloud Storage location (US, ASIA, or EU) for your - cluster’s temp bucket according to the Compute Engine zone - where your cluster is deployed, and then create and manage - this project-level, per-location bucket. The default bucket - has a TTL of 90 days, but you can use any TTL (or none) if you - specify a bucket. - gce_cluster_config: - Optional. The shared Compute Engine config settings for all - instances in a cluster. - master_config: - Optional. The Compute Engine config settings for the master - instance in a cluster. - worker_config: - Optional. The Compute Engine config settings for worker - instances in a cluster. - secondary_worker_config: - Optional. The Compute Engine config settings for additional - worker instances in a cluster. - software_config: - Optional. The config settings for software inside the cluster. - initialization_actions: - Optional. Commands to execute on each node after config is - completed. By default, executables are run on master and all - worker nodes. You can test a node’s ``role`` metadata to run - an executable on a master or worker node, as shown below using - ``curl`` (you can also use ``wget``): :: ROLE=$(curl -H - Metadata-Flavor:Google http://metadata/computeMetadata/v1/i - nstance/attributes/dataproc-role) if [[ "${ROLE}" == - 'Master' ]]; then ... master specific actions ... else - ... worker specific actions ... fi - encryption_config: - Optional. Encryption settings for the cluster. - autoscaling_config: - Optional. Autoscaling config for the policy associated with - the cluster. Cluster does not autoscale if this field is - unset. - security_config: - Optional. Security settings for the cluster. - lifecycle_config: - Optional. Lifecycle setting for the cluster. - endpoint_config: - Optional. Port/endpoint configuration for this cluster - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterConfig) - }, -) -_sym_db.RegisterMessage(ClusterConfig) - -EndpointConfig = _reflection.GeneratedProtocolMessageType( - "EndpointConfig", - (_message.Message,), - { - "HttpPortsEntry": _reflection.GeneratedProtocolMessageType( - "HttpPortsEntry", - (_message.Message,), - { - "DESCRIPTOR": _ENDPOINTCONFIG_HTTPPORTSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.EndpointConfig.HttpPortsEntry) - }, - ), - "DESCRIPTOR": _ENDPOINTCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Endpoint config for this cluster - - Attributes: - http_ports: - Output only. The map of port descriptions to URLs. Will only - be populated if enable_http_port_access is true. - enable_http_port_access: - Optional. If true, enable http access to specific ports on the - cluster from external sources. Defaults to false. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.EndpointConfig) - }, -) -_sym_db.RegisterMessage(EndpointConfig) -_sym_db.RegisterMessage(EndpointConfig.HttpPortsEntry) - -AutoscalingConfig = _reflection.GeneratedProtocolMessageType( - "AutoscalingConfig", - (_message.Message,), - { - "DESCRIPTOR": _AUTOSCALINGCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Autoscaling Policy config associated with the cluster. - - Attributes: - policy_uri: - Optional. The autoscaling policy used by the cluster. Only - resource names including projectid and location (region) are - valid. Examples: - ``https://www.googleapis.com/compute/v1/p - rojects/[project_id]/locations/[dataproc_region]/autoscalingPo - licies/[policy_id]`` - ``projects/[project_id]/locations/[dat - aproc_region]/autoscalingPolicies/[policy_id]`` Note that the - policy must be in the same project and Dataproc region. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.AutoscalingConfig) - }, -) -_sym_db.RegisterMessage(AutoscalingConfig) - -EncryptionConfig = _reflection.GeneratedProtocolMessageType( - "EncryptionConfig", - (_message.Message,), - { - "DESCRIPTOR": _ENCRYPTIONCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Encryption settings for the cluster. - - Attributes: - gce_pd_kms_key_name: - Optional. The Cloud KMS key name to use for PD disk encryption - for all instances in the cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.EncryptionConfig) - }, -) -_sym_db.RegisterMessage(EncryptionConfig) - -GceClusterConfig = _reflection.GeneratedProtocolMessageType( - "GceClusterConfig", - (_message.Message,), - { - "MetadataEntry": _reflection.GeneratedProtocolMessageType( - "MetadataEntry", - (_message.Message,), - { - "DESCRIPTOR": _GCECLUSTERCONFIG_METADATAENTRY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GceClusterConfig.MetadataEntry) - }, - ), - "DESCRIPTOR": _GCECLUSTERCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Common config settings for resources of Compute Engine cluster - instances, applicable to all instances in the cluster. - - Attributes: - zone_uri: - Optional. The zone where the Compute Engine cluster will be - located. On a create request, it is required in the “global” - region. If omitted in a non-global Dataproc region, the - service will pick a zone in the corresponding Compute Engine - region. On a get request, zone will always be present. A full - URL, partial URI, or short name are valid. Examples: - ``htt - ps://www.googleapis.com/compute/v1/projects/[project_id]/zones - /[zone]`` - ``projects/[project_id]/zones/[zone]`` - ``us- - central1-f`` - network_uri: - Optional. The Compute Engine network to be used for machine - communications. Cannot be specified with subnetwork_uri. If - neither ``network_uri`` nor ``subnetwork_uri`` is specified, - the “default” network of the project is used, if it exists. - Cannot be a “Custom Subnet Network” (see `Using Subnetworks - `__ for - more information). A full URL, partial URI, or short name are - valid. Examples: - ``https://www.googleapis.com/compute/v1/p - rojects/[project_id]/regions/global/default`` - - ``projects/[project_id]/regions/global/default`` - - ``default`` - subnetwork_uri: - Optional. The Compute Engine subnetwork to be used for machine - communications. Cannot be specified with network_uri. A full - URL, partial URI, or short name are valid. Examples: - ``htt - ps://www.googleapis.com/compute/v1/projects/[project_id]/regio - ns/us-east1/subnetworks/sub0`` - - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` - - ``sub0`` - internal_ip_only: - Optional. If true, all instances in the cluster will only have - internal IP addresses. By default, clusters are not restricted - to internal IP addresses, and will have ephemeral external IP - addresses assigned to each instance. This ``internal_ip_only`` - restriction can only be enabled for subnetwork enabled - networks, and all off-cluster dependencies must be configured - to be accessible without external IP addresses. - service_account: - Optional. The `Dataproc service account - `__ - (also see `VM Data Plane identity - `__) used by - Dataproc cluster VM instances to access Google Cloud Platform - services. If not specified, the `Compute Engine default - service account - `__ is used. - service_account_scopes: - Optional. The URIs of service account scopes to be included in - Compute Engine instances. The following base set of scopes is - always included: - - https://www.googleapis.com/auth/cloud.useraccounts.readonly - - https://www.googleapis.com/auth/devstorage.read_write - - https://www.googleapis.com/auth/logging.write If no scopes - are specified, the following defaults are also provided: - - https://www.googleapis.com/auth/bigquery - - https://www.googleapis.com/auth/bigtable.admin.table - - https://www.googleapis.com/auth/bigtable.data - - https://www.googleapis.com/auth/devstorage.full_control - tags: - The Compute Engine tags to add to all instances (see `Tagging - instances `__). - metadata: - The Compute Engine metadata entries to add to all instances - (see `Project and instance metadata - `__). - reservation_affinity: - Optional. Reservation Affinity for consuming Zonal - reservation. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GceClusterConfig) - }, -) -_sym_db.RegisterMessage(GceClusterConfig) -_sym_db.RegisterMessage(GceClusterConfig.MetadataEntry) - -InstanceGroupConfig = _reflection.GeneratedProtocolMessageType( - "InstanceGroupConfig", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCEGROUPCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """The config settings for Compute Engine resources in an instance group, - such as a master or worker group. - - Attributes: - num_instances: - Optional. The number of VM instances in the instance group. - For master instance groups, must be set to 1. - instance_names: - Output only. The list of instance names. Dataproc derives the - names from ``cluster_name``, ``num_instances``, and the - instance group. - image_uri: - Optional. The Compute Engine image resource used for cluster - instances. The URI can represent an image or image family. - Image examples: - ``https://www.googleapis.com/compute/beta/ - projects/[project_id]/global/images/[image-id]`` - - ``projects/[project_id]/global/images/[image-id]`` - ``image- - id`` Image family examples. Dataproc will use the most recent - image from the family: - ``https://www.googleapis.com/comput - e/beta/projects/[project_id]/global/images/family/[custom- - image-family-name]`` - - ``projects/[project_id]/global/images/family/[custom-image- - family-name]`` If the URI is unspecified, it will be inferred - from ``SoftwareConfig.image_version`` or the system default. - machine_type_uri: - Optional. The Compute Engine machine type used for cluster - instances. A full URL, partial URI, or short name are valid. - Examples: - ``https://www.googleapis.com/compute/v1/projects - /[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - - ``projects/[project_id]/zones/us- - east1-a/machineTypes/n1-standard-2`` - ``n1-standard-2`` - **Auto Zone Exception**: If you are using the Dataproc `Auto - Zone Placement - `__ feature, you - must use the short name of the machine type resource, for - example, ``n1-standard-2``. - disk_config: - Optional. Disk option config settings. - is_preemptible: - Output only. Specifies that this instance group contains - preemptible instances. - preemptibility: - Optional. Specifies the preemptibility of the instance group. - The default value for master and worker groups is - ``NON_PREEMPTIBLE``. This default cannot be changed. The - default value for secondary instances is ``PREEMPTIBLE``. - managed_group_config: - Output only. The config for Compute Engine Instance Group - Manager that manages this group. This is only used for - preemptible instance groups. - accelerators: - Optional. The Compute Engine accelerator configuration for - these instances. - min_cpu_platform: - Optional. Specifies the minimum cpu platform for the Instance - Group. See `Dataproc -> Minimum CPU Platform `__. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.InstanceGroupConfig) - }, -) -_sym_db.RegisterMessage(InstanceGroupConfig) - -ManagedGroupConfig = _reflection.GeneratedProtocolMessageType( - "ManagedGroupConfig", - (_message.Message,), - { - "DESCRIPTOR": _MANAGEDGROUPCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies the resources used to actively manage an instance group. - - Attributes: - instance_template_name: - Output only. The name of the Instance Template used for the - Managed Instance Group. - instance_group_manager_name: - Output only. The name of the Instance Group Manager for this - group. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ManagedGroupConfig) - }, -) -_sym_db.RegisterMessage(ManagedGroupConfig) - -AcceleratorConfig = _reflection.GeneratedProtocolMessageType( - "AcceleratorConfig", - (_message.Message,), - { - "DESCRIPTOR": _ACCELERATORCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies the type and number of accelerator cards attached to the - instances of an instance. See `GPUs on Compute Engine - `__. - - Attributes: - accelerator_type_uri: - Full URL, partial URI, or short name of the accelerator type - resource to expose to this instance. See `Compute Engine - AcceleratorTypes `__. Examples: - ``https://www.go - ogleapis.com/compute/beta/projects/[project_id]/zones/us- - east1-a/acceleratorTypes/nvidia-tesla-k80`` - - ``projects/[project_id]/zones/us- - east1-a/acceleratorTypes/nvidia-tesla-k80`` - ``nvidia- - tesla-k80`` **Auto Zone Exception**: If you are using the - Dataproc `Auto Zone Placement - `__ feature, you - must use the short name of the accelerator type resource, for - example, ``nvidia-tesla-k80``. - accelerator_count: - The number of the accelerator cards of this type exposed to - this instance. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.AcceleratorConfig) - }, -) -_sym_db.RegisterMessage(AcceleratorConfig) - -DiskConfig = _reflection.GeneratedProtocolMessageType( - "DiskConfig", - (_message.Message,), - { - "DESCRIPTOR": _DISKCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies the config of disk options for a group of VM instances. - - Attributes: - boot_disk_type: - Optional. Type of the boot disk (default is “pd-standard”). - Valid values: “pd-ssd” (Persistent Disk Solid State Drive) or - “pd-standard” (Persistent Disk Hard Disk Drive). - boot_disk_size_gb: - Optional. Size in GB of the boot disk (default is 500GB). - num_local_ssds: - Optional. Number of attached SSDs, from 0 to 4 (default is 0). - If SSDs are not attached, the boot disk is used to store - runtime logs and `HDFS `__ data. If one or more SSDs are - attached, this runtime bulk data is spread across them, and - the boot disk contains only basic config and installed - binaries. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DiskConfig) - }, -) -_sym_db.RegisterMessage(DiskConfig) - -NodeInitializationAction = _reflection.GeneratedProtocolMessageType( - "NodeInitializationAction", - (_message.Message,), - { - "DESCRIPTOR": _NODEINITIALIZATIONACTION, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies an executable to run on a fully configured node and a - timeout period for executable completion. - - Attributes: - executable_file: - Required. Cloud Storage URI of executable file. - execution_timeout: - Optional. Amount of time executable has to complete. Default - is 10 minutes (see JSON representation of `Duration - `__). Cluster creation fails with an - explanatory error message (the name of the executable that - caused the error and the exceeded timeout period) if the - executable is not completed at end of the timeout period. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.NodeInitializationAction) - }, -) -_sym_db.RegisterMessage(NodeInitializationAction) - -ClusterStatus = _reflection.GeneratedProtocolMessageType( - "ClusterStatus", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERSTATUS, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """The status of a cluster and its instances. - - Attributes: - state: - Output only. The cluster’s state. - detail: - Optional. Output only. Details of cluster’s state. - state_start_time: - Output only. Time when this state was entered (see JSON - representation of `Timestamp - `__). - substate: - Output only. Additional state information that includes status - reported by the agent. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterStatus) - }, -) -_sym_db.RegisterMessage(ClusterStatus) - -SecurityConfig = _reflection.GeneratedProtocolMessageType( - "SecurityConfig", - (_message.Message,), - { - "DESCRIPTOR": _SECURITYCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Security related configuration, including Kerberos. - - Attributes: - kerberos_config: - Kerberos related configuration. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SecurityConfig) - }, -) -_sym_db.RegisterMessage(SecurityConfig) - -KerberosConfig = _reflection.GeneratedProtocolMessageType( - "KerberosConfig", - (_message.Message,), - { - "DESCRIPTOR": _KERBEROSCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies Kerberos related configuration. - - Attributes: - enable_kerberos: - Optional. Flag to indicate whether to Kerberize the cluster - (default: false). Set this field to true to enable Kerberos on - a cluster. - root_principal_password_uri: - Required. The Cloud Storage URI of a KMS encrypted file - containing the root principal password. - kms_key_uri: - Required. The uri of the KMS key used to encrypt various - sensitive files. - keystore_uri: - Optional. The Cloud Storage URI of the keystore file used for - SSL encryption. If not provided, Dataproc will provide a self- - signed certificate. - truststore_uri: - Optional. The Cloud Storage URI of the truststore file used - for SSL encryption. If not provided, Dataproc will provide a - self-signed certificate. - keystore_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the password to the user provided keystore. For the - self-signed certificate, this password is generated by - Dataproc. - key_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the password to the user provided key. For the - self-signed certificate, this password is generated by - Dataproc. - truststore_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the password to the user provided truststore. For - the self-signed certificate, this password is generated by - Dataproc. - cross_realm_trust_realm: - Optional. The remote realm the Dataproc on-cluster KDC will - trust, should the user enable cross realm trust. - cross_realm_trust_kdc: - Optional. The KDC (IP or hostname) for the remote trusted - realm in a cross realm trust relationship. - cross_realm_trust_admin_server: - Optional. The admin server (IP or hostname) for the remote - trusted realm in a cross realm trust relationship. - cross_realm_trust_shared_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the shared password between the on-cluster Kerberos - realm and the remote trusted realm, in a cross realm trust - relationship. - kdc_db_key_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the master key of the KDC database. - tgt_lifetime_hours: - Optional. The lifetime of the ticket granting ticket, in - hours. If not specified, or user specifies 0, then default - value 10 will be used. - realm: - Optional. The name of the on-cluster Kerberos realm. If not - specified, the uppercased domain of hostnames will be the - realm. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.KerberosConfig) - }, -) -_sym_db.RegisterMessage(KerberosConfig) - -SoftwareConfig = _reflection.GeneratedProtocolMessageType( - "SoftwareConfig", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SOFTWARECONFIG_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SoftwareConfig.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SOFTWARECONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies the selection and config of software inside the cluster. - - Attributes: - image_version: - Optional. The version of software inside the cluster. It must - be one of the supported `Dataproc Versions `__, such as “1.2” - (including a subminor version, such as “1.2.29”), or the - `“preview” version `__. If - unspecified, it defaults to the latest Debian version. - properties: - Optional. The properties to set on daemon config files. - Property keys are specified in ``prefix:property`` format, for - example ``core:hadoop.tmp.dir``. The following are supported - prefixes and their mappings: - capacity-scheduler: - ``capacity-scheduler.xml`` - core: ``core-site.xml`` - - distcp: ``distcp-default.xml`` - hdfs: ``hdfs-site.xml`` - - hive: ``hive-site.xml`` - mapred: ``mapred-site.xml`` - pig: - ``pig.properties`` - spark: ``spark-defaults.conf`` - yarn: - ``yarn-site.xml`` For more information, see `Cluster - properties - `__. - optional_components: - Optional. The set of components to activate on the cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SoftwareConfig) - }, -) -_sym_db.RegisterMessage(SoftwareConfig) -_sym_db.RegisterMessage(SoftwareConfig.PropertiesEntry) - -LifecycleConfig = _reflection.GeneratedProtocolMessageType( - "LifecycleConfig", - (_message.Message,), - { - "DESCRIPTOR": _LIFECYCLECONFIG, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Specifies the cluster auto-delete schedule configuration. - - Attributes: - idle_delete_ttl: - Optional. The duration to keep the cluster alive while idling - (when no jobs are running). Passing this threshold will cause - the cluster to be deleted. Minimum value is 10 minutes; - maximum value is 14 days (see JSON representation of `Duration - `__. - ttl: - Either the exact time the cluster should be deleted at or the - cluster maximum age. - auto_delete_time: - Optional. The time when cluster will be auto-deleted (see JSON - representation of `Timestamp - `__). - auto_delete_ttl: - Optional. The lifetime duration of cluster. The cluster will - be auto-deleted at the end of this period. Minimum value is 10 - minutes; maximum value is 14 days (see JSON representation of - `Duration `__). - idle_start_time: - Output only. The time when cluster became idle (most recent - job finished) and became eligible for deletion due to idleness - (see JSON representation of `Timestamp - `__). - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.LifecycleConfig) - }, -) -_sym_db.RegisterMessage(LifecycleConfig) - -ClusterMetrics = _reflection.GeneratedProtocolMessageType( - "ClusterMetrics", - (_message.Message,), - { - "HdfsMetricsEntry": _reflection.GeneratedProtocolMessageType( - "HdfsMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERMETRICS_HDFSMETRICSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterMetrics.HdfsMetricsEntry) - }, - ), - "YarnMetricsEntry": _reflection.GeneratedProtocolMessageType( - "YarnMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERMETRICS_YARNMETRICSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterMetrics.YarnMetricsEntry) - }, - ), - "DESCRIPTOR": _CLUSTERMETRICS, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta - Feature**: This report is available for testing purposes only. It may - be changed before final release. - - Attributes: - hdfs_metrics: - The HDFS metrics. - yarn_metrics: - The YARN metrics. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterMetrics) - }, -) -_sym_db.RegisterMessage(ClusterMetrics) -_sym_db.RegisterMessage(ClusterMetrics.HdfsMetricsEntry) -_sym_db.RegisterMessage(ClusterMetrics.YarnMetricsEntry) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """A request to create a cluster. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster: - Required. The cluster to create. - request_id: - Optional. A unique id used to identify the request. If the - server receives two [CreateClusterRequest][google.cloud.datapr - oc.v1.CreateClusterRequest] requests with the same id, then - the second request will be ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -UpdateClusterRequest = _reflection.GeneratedProtocolMessageType( - "UpdateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """A request to update a cluster. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - cluster: - Required. The changes to the cluster. - graceful_decommission_timeout: - Optional. Timeout for graceful YARN decomissioning. Graceful - decommissioning allows removing nodes from the cluster without - interrupting jobs in progress. Timeout specifies how long to - wait for jobs in progress to finish before forcefully removing - nodes (and potentially interrupting jobs). Default timeout is - 0 (for forceful decommission), and the maximum allowed timeout - is 1 day. (see JSON representation of `Duration - `__). Only supported on Dataproc - image versions 1.2 and higher. - update_mask: - Required. Specifies the path, relative to ``Cluster``, of the - field to update. For example, to change the number of workers - in a cluster to 5, the ``update_mask`` parameter would be - specified as ``config.worker_config.num_instances``, and the - ``PATCH`` request body would specify the new value, as - follows: :: { "config":{ "workerConfig":{ - "numInstances":"5" } } } Similarly, to change - the number of preemptible workers in a cluster to 5, the - ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the - ``PATCH`` request body would be set as follows: :: { - "config":{ "secondaryWorkerConfig":{ - "numInstances":"5" } } } Note: Currently, only - the following fields can be updated: .. raw:: html - .. raw:: html .. raw:: html .. - raw:: html .. raw:: - html .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. raw:: - html .. raw:: html - .. raw:: html .. raw:: html .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: - html .. raw:: html - .. raw:: html .. raw:: html .. - raw:: html .. raw:: html
Mask .. raw:: html Purpose .. raw:: html
labels - .. raw:: html Update labels - .. raw:: html
- config.worker_config.num_instances .. raw:: html Resize primary worker group .. raw:: - html
- config.secondary_worker_config.num_instances .. raw:: html - Resize secondary worker group - .. raw:: html
- config.autoscaling_config.policy_uri .. raw:: html Use, stop using, or change autoscaling - policies .. raw:: html
- request_id: - Optional. A unique id used to identify the request. If the - server receives two [UpdateClusterRequest][google.cloud.datapr - oc.v1.UpdateClusterRequest] requests with the same id, then - the second request will be ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.UpdateClusterRequest) - }, -) -_sym_db.RegisterMessage(UpdateClusterRequest) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """A request to delete a cluster. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - cluster_uuid: - Optional. Specifying the ``cluster_uuid`` means the RPC should - fail (with error NOT_FOUND) if cluster with specified UUID - does not exist. - request_id: - Optional. A unique id used to identify the request. If the - server receives two [DeleteClusterRequest][google.cloud.datapr - oc.v1.DeleteClusterRequest] requests with the same id, then - the second request will be ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Request to get the resource representation for a cluster in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """A request to list the clusters in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - filter: - Optional. A filter constraining the clusters to list. Filters - are case-sensitive and have the following syntax: field = - value [AND [field = value]] … where **field** is one of - ``status.state``, ``clusterName``, or ``labels.[KEY]``, and - ``[KEY]`` is a label key. **value** can be ``*`` to match all - values. ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, - ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains - the ``CREATING``, ``UPDATING``, and ``RUNNING`` states. - ``INACTIVE`` contains the ``DELETING`` and ``ERROR`` states. - ``clusterName`` is the name of the cluster provided at - creation time. Only the logical ``AND`` operator is supported; - space-separated items are treated as having an implicit - ``AND`` operator. Example filter: status.state = ACTIVE AND - clusterName = mycluster AND labels.env = staging AND - labels.starred = \* - page_size: - Optional. The standard List page size. - page_token: - Optional. The standard List page token. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """The list of all clusters in a project. - - Attributes: - clusters: - Output only. The clusters in the project. - next_page_token: - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the ``page_token`` in a subsequent - ``ListClustersRequest``. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -DiagnoseClusterRequest = _reflection.GeneratedProtocolMessageType( - "DiagnoseClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DIAGNOSECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """A request to collect cluster diagnostic information. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DiagnoseClusterRequest) - }, -) -_sym_db.RegisterMessage(DiagnoseClusterRequest) - -DiagnoseClusterResults = _reflection.GeneratedProtocolMessageType( - "DiagnoseClusterResults", - (_message.Message,), - { - "DESCRIPTOR": _DIAGNOSECLUSTERRESULTS, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """The location of diagnostic output. - - Attributes: - output_uri: - Output only. The Cloud Storage URI of the diagnostic output. - The output report is a plain text file with a summary of - collected diagnostics. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DiagnoseClusterResults) - }, -) -_sym_db.RegisterMessage(DiagnoseClusterResults) - -ReservationAffinity = _reflection.GeneratedProtocolMessageType( - "ReservationAffinity", - (_message.Message,), - { - "DESCRIPTOR": _RESERVATIONAFFINITY, - "__module__": "google.cloud.dataproc_v1.proto.clusters_pb2", - "__doc__": """Reservation Affinity for consuming Zonal reservation. - - Attributes: - consume_reservation_type: - Optional. Type of reservation to consume - key: - Optional. Corresponds to the label key of reservation - resource. - values: - Optional. Corresponds to the label values of reservation - resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ReservationAffinity) - }, -) -_sym_db.RegisterMessage(ReservationAffinity) - - -DESCRIPTOR._options = None -_CLUSTER_LABELSENTRY._options = None -_CLUSTER.fields_by_name["project_id"]._options = None -_CLUSTER.fields_by_name["cluster_name"]._options = None -_CLUSTER.fields_by_name["config"]._options = None -_CLUSTER.fields_by_name["labels"]._options = None -_CLUSTER.fields_by_name["status"]._options = None -_CLUSTER.fields_by_name["status_history"]._options = None -_CLUSTER.fields_by_name["cluster_uuid"]._options = None -_CLUSTER.fields_by_name["metrics"]._options = None -_CLUSTERCONFIG.fields_by_name["config_bucket"]._options = None -_CLUSTERCONFIG.fields_by_name["temp_bucket"]._options = None -_CLUSTERCONFIG.fields_by_name["gce_cluster_config"]._options = None -_CLUSTERCONFIG.fields_by_name["master_config"]._options = None -_CLUSTERCONFIG.fields_by_name["worker_config"]._options = None -_CLUSTERCONFIG.fields_by_name["secondary_worker_config"]._options = None -_CLUSTERCONFIG.fields_by_name["software_config"]._options = None -_CLUSTERCONFIG.fields_by_name["initialization_actions"]._options = None -_CLUSTERCONFIG.fields_by_name["encryption_config"]._options = None -_CLUSTERCONFIG.fields_by_name["autoscaling_config"]._options = None -_CLUSTERCONFIG.fields_by_name["security_config"]._options = None -_CLUSTERCONFIG.fields_by_name["lifecycle_config"]._options = None -_CLUSTERCONFIG.fields_by_name["endpoint_config"]._options = None -_ENDPOINTCONFIG_HTTPPORTSENTRY._options = None -_ENDPOINTCONFIG.fields_by_name["http_ports"]._options = None -_ENDPOINTCONFIG.fields_by_name["enable_http_port_access"]._options = None -_AUTOSCALINGCONFIG.fields_by_name["policy_uri"]._options = None -_ENCRYPTIONCONFIG.fields_by_name["gce_pd_kms_key_name"]._options = None -_GCECLUSTERCONFIG_METADATAENTRY._options = None -_GCECLUSTERCONFIG.fields_by_name["zone_uri"]._options = None -_GCECLUSTERCONFIG.fields_by_name["network_uri"]._options = None -_GCECLUSTERCONFIG.fields_by_name["subnetwork_uri"]._options = None -_GCECLUSTERCONFIG.fields_by_name["internal_ip_only"]._options = None -_GCECLUSTERCONFIG.fields_by_name["service_account"]._options = None -_GCECLUSTERCONFIG.fields_by_name["service_account_scopes"]._options = None -_GCECLUSTERCONFIG.fields_by_name["reservation_affinity"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["num_instances"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["instance_names"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["image_uri"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["machine_type_uri"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["disk_config"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["is_preemptible"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["preemptibility"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["managed_group_config"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["accelerators"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["min_cpu_platform"]._options = None -_MANAGEDGROUPCONFIG.fields_by_name["instance_template_name"]._options = None -_MANAGEDGROUPCONFIG.fields_by_name["instance_group_manager_name"]._options = None -_DISKCONFIG.fields_by_name["boot_disk_type"]._options = None -_DISKCONFIG.fields_by_name["boot_disk_size_gb"]._options = None -_DISKCONFIG.fields_by_name["num_local_ssds"]._options = None -_NODEINITIALIZATIONACTION.fields_by_name["executable_file"]._options = None -_NODEINITIALIZATIONACTION.fields_by_name["execution_timeout"]._options = None -_CLUSTERSTATUS.fields_by_name["state"]._options = None -_CLUSTERSTATUS.fields_by_name["detail"]._options = None -_CLUSTERSTATUS.fields_by_name["state_start_time"]._options = None -_CLUSTERSTATUS.fields_by_name["substate"]._options = None -_KERBEROSCONFIG.fields_by_name["enable_kerberos"]._options = None -_KERBEROSCONFIG.fields_by_name["root_principal_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["kms_key_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["keystore_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["truststore_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["keystore_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["key_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["truststore_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_realm"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_kdc"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_admin_server"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_shared_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["kdc_db_key_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["tgt_lifetime_hours"]._options = None -_KERBEROSCONFIG.fields_by_name["realm"]._options = None -_SOFTWARECONFIG_PROPERTIESENTRY._options = None -_SOFTWARECONFIG.fields_by_name["image_version"]._options = None -_SOFTWARECONFIG.fields_by_name["properties"]._options = None -_SOFTWARECONFIG.fields_by_name["optional_components"]._options = None -_LIFECYCLECONFIG.fields_by_name["idle_delete_ttl"]._options = None -_LIFECYCLECONFIG.fields_by_name["auto_delete_time"]._options = None -_LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"]._options = None -_LIFECYCLECONFIG.fields_by_name["idle_start_time"]._options = None -_CLUSTERMETRICS_HDFSMETRICSENTRY._options = None -_CLUSTERMETRICS_YARNMETRICSENTRY._options = None -_CREATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["region"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["request_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["region"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["graceful_decommission_timeout"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["update_mask"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["request_id"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["region"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["cluster_uuid"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["request_id"]._options = None -_GETCLUSTERREQUEST.fields_by_name["project_id"]._options = None -_GETCLUSTERREQUEST.fields_by_name["region"]._options = None -_GETCLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["project_id"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["region"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["filter"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["page_size"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["page_token"]._options = None -_LISTCLUSTERSRESPONSE.fields_by_name["clusters"]._options = None -_LISTCLUSTERSRESPONSE.fields_by_name["next_page_token"]._options = None -_DIAGNOSECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_DIAGNOSECLUSTERREQUEST.fields_by_name["region"]._options = None -_DIAGNOSECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_DIAGNOSECLUSTERRESULTS.fields_by_name["output_uri"]._options = None -_RESERVATIONAFFINITY.fields_by_name["consume_reservation_type"]._options = None -_RESERVATIONAFFINITY.fields_by_name["key"]._options = None -_RESERVATIONAFFINITY.fields_by_name["values"]._options = None - -_CLUSTERCONTROLLER = _descriptor.ServiceDescriptor( - name="ClusterController", - full_name="google.cloud.dataproc.v1.ClusterController", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=6586, - serialized_end=8249, - methods=[ - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.cloud.dataproc.v1.ClusterController.CreateCluster", - index=0, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002>"3/v1/projects/{project_id}/regions/{region}/clusters:\007cluster\332A\031project_id,region,cluster\312A<\n\007Cluster\0221google.cloud.dataproc.v1.ClusterOperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.cloud.dataproc.v1.ClusterController.UpdateCluster", - index=1, - containing_service=None, - input_type=_UPDATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002M2B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\007cluster\332A2project_id,region,cluster_name,cluster,update_mask\312A<\n\007Cluster\0221google.cloud.dataproc.v1.ClusterOperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.cloud.dataproc.v1.ClusterController.DeleteCluster", - index=2, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002D*B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\332A\036project_id,region,cluster_name\312AJ\n\025google.protobuf.Empty\0221google.cloud.dataproc.v1.ClusterOperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.cloud.dataproc.v1.ClusterController.GetCluster", - index=3, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=_CLUSTER, - serialized_options=b"\202\323\344\223\002D\022B/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}\332A\036project_id,region,cluster_name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.cloud.dataproc.v1.ClusterController.ListClusters", - index=4, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\0025\0223/v1/projects/{project_id}/regions/{region}/clusters\332A\021project_id,region\332A\030project_id,region,filter", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DiagnoseCluster", - full_name="google.cloud.dataproc.v1.ClusterController.DiagnoseCluster", - index=5, - containing_service=None, - input_type=_DIAGNOSECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002P"K/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\001*\332A\036project_id,region,cluster_name\312AK\n\026DiagnoseClusterResults\0221google.cloud.dataproc.v1.ClusterOperationMetadata', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_CLUSTERCONTROLLER) - -DESCRIPTOR.services_by_name["ClusterController"] = _CLUSTERCONTROLLER - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py b/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py deleted file mode 100644 index 8eab4906..00000000 --- a/google/cloud/dataproc_v1/proto/clusters_pb2_grpc.py +++ /dev/null @@ -1,321 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.dataproc_v1.proto import ( - clusters_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -class ClusterControllerStub(object): - """The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateCluster = channel.unary_unary( - "/google.cloud.dataproc.v1.ClusterController/CreateCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UpdateCluster = channel.unary_unary( - "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.UpdateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - "/google.cloud.dataproc.v1.ClusterController/GetCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - "/google.cloud.dataproc.v1.ClusterController/ListClusters", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.ListClustersResponse.FromString, - ) - self.DiagnoseCluster = channel.unary_unary( - "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - - -class ClusterControllerServicer(object): - """The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - def CreateCluster(self, request, context): - """Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateCluster(self, request, context): - """Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteCluster(self, request, context): - """Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetCluster(self, request, context): - """Gets the resource representation for a cluster in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListClusters(self, request, context): - """Lists all regions/{region}/clusters in a project alphabetically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DiagnoseCluster(self, request, context): - """Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata). - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains - [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_ClusterControllerServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateCluster": grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UpdateCluster": grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.UpdateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteCluster": grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetCluster": grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.Cluster.SerializeToString, - ), - "ListClusters": grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.ListClustersResponse.SerializeToString, - ), - "DiagnoseCluster": grpc.unary_unary_rpc_method_handler( - servicer.DiagnoseCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.dataproc.v1.ClusterController", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class ClusterController(object): - """The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - @staticmethod - def CreateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.ClusterController/CreateCluster", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.CreateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.UpdateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.DeleteClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.ClusterController/GetCluster", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.GetClusterRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.Cluster.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListClusters( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.ClusterController/ListClusters", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.ListClustersRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.ListClustersResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DiagnoseCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/dataproc_v1/proto/jobs_pb2.py b/google/cloud/dataproc_v1/proto/jobs_pb2.py deleted file mode 100644 index 7e26f79d..00000000 --- a/google/cloud/dataproc_v1/proto/jobs_pb2.py +++ /dev/null @@ -1,5403 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1/proto/jobs.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1/proto/jobs.proto", - package="google.cloud.dataproc.v1", - syntax="proto3", - serialized_options=b"\n\034com.google.cloud.dataproc.v1B\tJobsProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n)google/cloud/dataproc_v1/proto/jobs.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xc1\x02\n\rLoggingConfig\x12W\n\x11\x64river_log_levels\x18\x02 \x03(\x0b\x32<.google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry\x1a\x65\n\x14\x44riverLogLevelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0e\x32-.google.cloud.dataproc.v1.LoggingConfig.Level:\x02\x38\x01"p\n\x05Level\x12\x15\n\x11LEVEL_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\t\n\x05TRACE\x10\x02\x12\t\n\x05\x44\x45\x42UG\x10\x03\x12\x08\n\x04INFO\x10\x04\x12\x08\n\x04WARN\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\t\n\x05\x46\x41TAL\x10\x07\x12\x07\n\x03OFF\x10\x08"\xf1\x02\n\tHadoopJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x07 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.HadoopJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xef\x02\n\x08SparkJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12K\n\nproperties\x18\x07 \x03(\x0b\x32\x32.google.cloud.dataproc.v1.SparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xf8\x02\n\nPySparkJob\x12!\n\x14main_python_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x1d\n\x10python_file_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12M\n\nproperties\x18\x07 \x03(\x0b\x32\x34.google.cloud.dataproc.v1.PySparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x08 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"!\n\tQueryList\x12\x14\n\x07queries\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xb5\x03\n\x07HiveJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12U\n\x10script_variables\x18\x04 \x03(\x0b\x32\x36.google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12J\n\nproperties\x18\x05 \x03(\x0b\x32\x31.google.cloud.dataproc.v1.HiveJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xe5\x03\n\x0bSparkSqlJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12Y\n\x10script_variables\x18\x03 \x03(\x0b\x32:.google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12N\n\nproperties\x18\x04 \x03(\x0b\x32\x35.google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x38 \x03(\tB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xf8\x03\n\x06PigJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12T\n\x10script_variables\x18\x04 \x03(\x0b\x32\x35.google.cloud.dataproc.v1.PigJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12I\n\nproperties\x18\x05 \x03(\x0b\x32\x30.google.cloud.dataproc.v1.PigJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x07 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xb6\x02\n\tSparkRJob\x12\x1c\n\x0fmain_r_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x05 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.SparkRJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x06 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x8a\x03\n\tPrestoJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12\x39\n\nquery_list\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1a\n\routput_format\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0b\x63lient_tags\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12L\n\nproperties\x18\x06 \x03(\x0b\x32\x33.google.cloud.dataproc.v1.PrestoJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x44\n\x0elogging_config\x18\x07 \x01(\x0b\x32\'.google.cloud.dataproc.v1.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"D\n\x0cJobPlacement\x12\x19\n\x0c\x63luster_name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x02 \x01(\tB\x03\xe0\x41\x03"\xd9\x03\n\tJobStatus\x12=\n\x05state\x18\x01 \x01(\x0e\x32).google.cloud.dataproc.v1.JobStatus.StateB\x03\xe0\x41\x03\x12\x17\n\x07\x64\x65tails\x18\x02 \x01(\tB\x06\xe0\x41\x03\xe0\x41\x01\x12\x39\n\x10state_start_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x43\n\x08substate\x18\x07 \x01(\x0e\x32,.google.cloud.dataproc.v1.JobStatus.SubstateB\x03\xe0\x41\x03"\xa9\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0e\n\nSETUP_DONE\x10\x08\x12\x0b\n\x07RUNNING\x10\x02\x12\x12\n\x0e\x43\x41NCEL_PENDING\x10\x03\x12\x12\n\x0e\x43\x41NCEL_STARTED\x10\x07\x12\r\n\tCANCELLED\x10\x04\x12\x08\n\x04\x44ONE\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\x13\n\x0f\x41TTEMPT_FAILURE\x10\t"H\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tSUBMITTED\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\x10\n\x0cSTALE_STATUS\x10\x03"<\n\x0cJobReference\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x01"\xa5\x02\n\x0fYarnApplication\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x43\n\x05state\x18\x02 \x01(\x0e\x32/.google.cloud.dataproc.v1.YarnApplication.StateB\x03\xe0\x41\x02\x12\x15\n\x08progress\x18\x03 \x01(\x02\x42\x03\xe0\x41\x02\x12\x19\n\x0ctracking_url\x18\x04 \x01(\tB\x03\xe0\x41\x01"\x87\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nNEW_SAVING\x10\x02\x12\r\n\tSUBMITTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04\x12\x0b\n\x07RUNNING\x10\x05\x12\x0c\n\x08\x46INISHED\x10\x06\x12\n\n\x06\x46\x41ILED\x10\x07\x12\n\n\x06KILLED\x10\x08"\xff\x08\n\x03Job\x12>\n\treference\x18\x01 \x01(\x0b\x32&.google.cloud.dataproc.v1.JobReferenceB\x03\xe0\x41\x01\x12>\n\tplacement\x18\x02 \x01(\x0b\x32&.google.cloud.dataproc.v1.JobPlacementB\x03\xe0\x41\x02\x12>\n\nhadoop_job\x18\x03 \x01(\x0b\x32#.google.cloud.dataproc.v1.HadoopJobB\x03\xe0\x41\x01H\x00\x12<\n\tspark_job\x18\x04 \x01(\x0b\x32".google.cloud.dataproc.v1.SparkJobB\x03\xe0\x41\x01H\x00\x12@\n\x0bpyspark_job\x18\x05 \x01(\x0b\x32$.google.cloud.dataproc.v1.PySparkJobB\x03\xe0\x41\x01H\x00\x12:\n\x08hive_job\x18\x06 \x01(\x0b\x32!.google.cloud.dataproc.v1.HiveJobB\x03\xe0\x41\x01H\x00\x12\x38\n\x07pig_job\x18\x07 \x01(\x0b\x32 .google.cloud.dataproc.v1.PigJobB\x03\xe0\x41\x01H\x00\x12?\n\x0bspark_r_job\x18\x15 \x01(\x0b\x32#.google.cloud.dataproc.v1.SparkRJobB\x03\xe0\x41\x01H\x00\x12\x43\n\rspark_sql_job\x18\x0c \x01(\x0b\x32%.google.cloud.dataproc.v1.SparkSqlJobB\x03\xe0\x41\x01H\x00\x12>\n\npresto_job\x18\x17 \x01(\x0b\x32#.google.cloud.dataproc.v1.PrestoJobB\x03\xe0\x41\x01H\x00\x12\x38\n\x06status\x18\x08 \x01(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12@\n\x0estatus_history\x18\r \x03(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12I\n\x11yarn_applications\x18\t \x03(\x0b\x32).google.cloud.dataproc.v1.YarnApplicationB\x03\xe0\x41\x03\x12\'\n\x1a\x64river_output_resource_uri\x18\x11 \x01(\tB\x03\xe0\x41\x03\x12%\n\x18\x64river_control_files_uri\x18\x0f \x01(\tB\x03\xe0\x41\x03\x12>\n\x06labels\x18\x12 \x03(\x0b\x32).google.cloud.dataproc.v1.Job.LabelsEntryB\x03\xe0\x41\x01\x12@\n\nscheduling\x18\x14 \x01(\x0b\x32\'.google.cloud.dataproc.v1.JobSchedulingB\x03\xe0\x41\x01\x12\x15\n\x08job_uuid\x18\x16 \x01(\tB\x03\xe0\x41\x03\x12\x11\n\x04\x64one\x18\x18 \x01(\x08\x42\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08type_job"3\n\rJobScheduling\x12"\n\x15max_failures_per_hour\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01"\x8a\x01\n\x10SubmitJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12/\n\x03job\x18\x02 \x01(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xae\x01\n\x0bJobMetadata\x12\x13\n\x06job_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x38\n\x06status\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.JobStatusB\x03\xe0\x41\x03\x12\x1b\n\x0eoperation_type\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03"R\n\rGetJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"\xb3\x02\n\x0fListJobsRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x63luster_name\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12Y\n\x11job_state_matcher\x18\x05 \x01(\x0e\x32\x39.google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcherB\x03\xe0\x41\x01\x12\x13\n\x06\x66ilter\x18\x07 \x01(\tB\x03\xe0\x41\x01"6\n\x0fJobStateMatcher\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\x0e\n\nNON_ACTIVE\x10\x02"\xbc\x01\n\x10UpdateJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12/\n\x03job\x18\x04 \x01(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"b\n\x10ListJobsResponse\x12\x30\n\x04jobs\x18\x01 \x03(\x0b\x32\x1d.google.cloud.dataproc.v1.JobB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x01"U\n\x10\x43\x61ncelJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"U\n\x10\x44\x65leteJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\xfc\n\n\rJobController\x12\xb1\x01\n\tSubmitJob\x12*.google.cloud.dataproc.v1.SubmitJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"Y\x82\xd3\xe4\x93\x02;"6/v1/projects/{project_id}/regions/{region}/jobs:submit:\x01*\xda\x41\x15project_id,region,job\x12\xde\x01\n\x14SubmitJobAsOperation\x12*.google.cloud.dataproc.v1.SubmitJobRequest\x1a\x1d.google.longrunning.Operation"{\x82\xd3\xe4\x93\x02\x46"A/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation:\x01*\xda\x41\x17project_id, region, job\xca\x41\x12\n\x03Job\x12\x0bJobMetadata\x12\xad\x01\n\x06GetJob\x12\'.google.cloud.dataproc.v1.GetJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"[\x82\xd3\xe4\x93\x02:\x12\x38/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x18project_id,region,job_id\x12\xc9\x01\n\x08ListJobs\x12).google.cloud.dataproc.v1.ListJobsRequest\x1a*.google.cloud.dataproc.v1.ListJobsResponse"f\x82\xd3\xe4\x93\x02\x31\x12//v1/projects/{project_id}/regions/{region}/jobs\xda\x41\x11project_id,region\xda\x41\x18project_id,region,filter\x12\x9d\x01\n\tUpdateJob\x12*.google.cloud.dataproc.v1.UpdateJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"E\x82\xd3\xe4\x93\x02?28/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:\x03job\x12\xbd\x01\n\tCancelJob\x12*.google.cloud.dataproc.v1.CancelJobRequest\x1a\x1d.google.cloud.dataproc.v1.Job"e\x82\xd3\xe4\x93\x02\x44"?/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\x01*\xda\x41\x18project_id,region,job_id\x12\xac\x01\n\tDeleteJob\x12*.google.cloud.dataproc.v1.DeleteJobRequest\x1a\x16.google.protobuf.Empty"[\x82\xd3\xe4\x93\x02:*8/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x18project_id,region,job_id\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBm\n\x1c\x63om.google.cloud.dataproc.v1B\tJobsProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_LOGGINGCONFIG_LEVEL = _descriptor.EnumDescriptor( - name="Level", - full_name="google.cloud.dataproc.v1.LoggingConfig.Level", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="LEVEL_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ALL", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TRACE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DEBUG", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INFO", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="WARN", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ERROR", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FATAL", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="OFF", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=502, - serialized_end=614, -) -_sym_db.RegisterEnumDescriptor(_LOGGINGCONFIG_LEVEL) - -_JOBSTATUS_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1.JobStatus.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PENDING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SETUP_DONE", - index=2, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=3, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CANCEL_PENDING", - index=4, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CANCEL_STARTED", - index=5, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CANCELLED", - index=6, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DONE", - index=7, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ERROR", - index=8, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ATTEMPT_FAILURE", - index=9, - number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4218, - serialized_end=4387, -) -_sym_db.RegisterEnumDescriptor(_JOBSTATUS_STATE) - -_JOBSTATUS_SUBSTATE = _descriptor.EnumDescriptor( - name="Substate", - full_name="google.cloud.dataproc.v1.JobStatus.Substate", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SUBMITTED", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="QUEUED", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STALE_STATUS", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4389, - serialized_end=4461, -) -_sym_db.RegisterEnumDescriptor(_JOBSTATUS_SUBSTATE) - -_YARNAPPLICATION_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1.YarnApplication.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NEW", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NEW_SAVING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SUBMITTED", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ACCEPTED", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FINISHED", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FAILED", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="KILLED", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4684, - serialized_end=4819, -) -_sym_db.RegisterEnumDescriptor(_YARNAPPLICATION_STATE) - -_LISTJOBSREQUEST_JOBSTATEMATCHER = _descriptor.EnumDescriptor( - name="JobStateMatcher", - full_name="google.cloud.dataproc.v1.ListJobsRequest.JobStateMatcher", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="ALL", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ACTIVE", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NON_ACTIVE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=6684, - serialized_end=6738, -) -_sym_db.RegisterEnumDescriptor(_LISTJOBSREQUEST_JOBSTATEMATCHER) - - -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY = _descriptor.Descriptor( - name="DriverLogLevelsEntry", - full_name="google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry.value", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=399, - serialized_end=500, -) - -_LOGGINGCONFIG = _descriptor.Descriptor( - name="LoggingConfig", - full_name="google.cloud.dataproc.v1.LoggingConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="driver_log_levels", - full_name="google.cloud.dataproc.v1.LoggingConfig.driver_log_levels", - index=0, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY,], - enum_types=[_LOGGINGCONFIG_LEVEL,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=293, - serialized_end=614, -) - - -_HADOOPJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.HadoopJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.HadoopJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.HadoopJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_HADOOPJOB = _descriptor.Descriptor( - name="HadoopJob", - full_name="google.cloud.dataproc.v1.HadoopJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_jar_file_uri", - full_name="google.cloud.dataproc.v1.HadoopJob.main_jar_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="main_class", - full_name="google.cloud.dataproc.v1.HadoopJob.main_class", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1.HadoopJob.args", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1.HadoopJob.jar_file_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1.HadoopJob.file_uris", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1.HadoopJob.archive_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.HadoopJob.properties", - index=6, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.HadoopJob.logging_config", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_HADOOPJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="driver", - full_name="google.cloud.dataproc.v1.HadoopJob.driver", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=617, - serialized_end=986, -) - - -_SPARKJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.SparkJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.SparkJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.SparkJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_SPARKJOB = _descriptor.Descriptor( - name="SparkJob", - full_name="google.cloud.dataproc.v1.SparkJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_jar_file_uri", - full_name="google.cloud.dataproc.v1.SparkJob.main_jar_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="main_class", - full_name="google.cloud.dataproc.v1.SparkJob.main_class", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1.SparkJob.args", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1.SparkJob.jar_file_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1.SparkJob.file_uris", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1.SparkJob.archive_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.SparkJob.properties", - index=6, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.SparkJob.logging_config", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SPARKJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="driver", - full_name="google.cloud.dataproc.v1.SparkJob.driver", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=989, - serialized_end=1356, -) - - -_PYSPARKJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.PySparkJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.PySparkJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.PySparkJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_PYSPARKJOB = _descriptor.Descriptor( - name="PySparkJob", - full_name="google.cloud.dataproc.v1.PySparkJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_python_file_uri", - full_name="google.cloud.dataproc.v1.PySparkJob.main_python_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1.PySparkJob.args", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="python_file_uris", - full_name="google.cloud.dataproc.v1.PySparkJob.python_file_uris", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1.PySparkJob.jar_file_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1.PySparkJob.file_uris", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1.PySparkJob.archive_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.PySparkJob.properties", - index=6, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.PySparkJob.logging_config", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PYSPARKJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1359, - serialized_end=1735, -) - - -_QUERYLIST = _descriptor.Descriptor( - name="QueryList", - full_name="google.cloud.dataproc.v1.QueryList", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1.QueryList.queries", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1737, - serialized_end=1770, -) - - -_HIVEJOB_SCRIPTVARIABLESENTRY = _descriptor.Descriptor( - name="ScriptVariablesEntry", - full_name="google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2094, - serialized_end=2148, -) - -_HIVEJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.HiveJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.HiveJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.HiveJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_HIVEJOB = _descriptor.Descriptor( - name="HiveJob", - full_name="google.cloud.dataproc.v1.HiveJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1.HiveJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1.HiveJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="continue_on_failure", - full_name="google.cloud.dataproc.v1.HiveJob.continue_on_failure", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="script_variables", - full_name="google.cloud.dataproc.v1.HiveJob.script_variables", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.HiveJob.properties", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1.HiveJob.jar_file_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_HIVEJOB_SCRIPTVARIABLESENTRY, _HIVEJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1.HiveJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1773, - serialized_end=2210, -) - - -_SPARKSQLJOB_SCRIPTVARIABLESENTRY = _descriptor.Descriptor( - name="ScriptVariablesEntry", - full_name="google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2094, - serialized_end=2148, -) - -_SPARKSQLJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_SPARKSQLJOB = _descriptor.Descriptor( - name="SparkSqlJob", - full_name="google.cloud.dataproc.v1.SparkSqlJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1.SparkSqlJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1.SparkSqlJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="script_variables", - full_name="google.cloud.dataproc.v1.SparkSqlJob.script_variables", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.SparkSqlJob.properties", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1.SparkSqlJob.jar_file_uris", - index=4, - number=56, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.SparkSqlJob.logging_config", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SPARKSQLJOB_SCRIPTVARIABLESENTRY, _SPARKSQLJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1.SparkSqlJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2213, - serialized_end=2698, -) - - -_PIGJOB_SCRIPTVARIABLESENTRY = _descriptor.Descriptor( - name="ScriptVariablesEntry", - full_name="google.cloud.dataproc.v1.PigJob.ScriptVariablesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.PigJob.ScriptVariablesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.PigJob.ScriptVariablesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2094, - serialized_end=2148, -) - -_PIGJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.PigJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.PigJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.PigJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_PIGJOB = _descriptor.Descriptor( - name="PigJob", - full_name="google.cloud.dataproc.v1.PigJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1.PigJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1.PigJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="continue_on_failure", - full_name="google.cloud.dataproc.v1.PigJob.continue_on_failure", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="script_variables", - full_name="google.cloud.dataproc.v1.PigJob.script_variables", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.PigJob.properties", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1.PigJob.jar_file_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.PigJob.logging_config", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PIGJOB_SCRIPTVARIABLESENTRY, _PIGJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1.PigJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2701, - serialized_end=3205, -) - - -_SPARKRJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.SparkRJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.SparkRJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.SparkRJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_SPARKRJOB = _descriptor.Descriptor( - name="SparkRJob", - full_name="google.cloud.dataproc.v1.SparkRJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_r_file_uri", - full_name="google.cloud.dataproc.v1.SparkRJob.main_r_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1.SparkRJob.args", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1.SparkRJob.file_uris", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1.SparkRJob.archive_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.SparkRJob.properties", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.SparkRJob.logging_config", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SPARKRJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3208, - serialized_end=3518, -) - - -_PRESTOJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1.PrestoJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.PrestoJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.PrestoJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=927, - serialized_end=976, -) - -_PRESTOJOB = _descriptor.Descriptor( - name="PrestoJob", - full_name="google.cloud.dataproc.v1.PrestoJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1.PrestoJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1.PrestoJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="continue_on_failure", - full_name="google.cloud.dataproc.v1.PrestoJob.continue_on_failure", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="output_format", - full_name="google.cloud.dataproc.v1.PrestoJob.output_format", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="client_tags", - full_name="google.cloud.dataproc.v1.PrestoJob.client_tags", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1.PrestoJob.properties", - index=5, - number=6, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1.PrestoJob.logging_config", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PRESTOJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1.PrestoJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=3521, - serialized_end=3915, -) - - -_JOBPLACEMENT = _descriptor.Descriptor( - name="JobPlacement", - full_name="google.cloud.dataproc.v1.JobPlacement", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.JobPlacement.cluster_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1.JobPlacement.cluster_uuid", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3917, - serialized_end=3985, -) - - -_JOBSTATUS = _descriptor.Descriptor( - name="JobStatus", - full_name="google.cloud.dataproc.v1.JobStatus", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1.JobStatus.state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="details", - full_name="google.cloud.dataproc.v1.JobStatus.details", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state_start_time", - full_name="google.cloud.dataproc.v1.JobStatus.state_start_time", - index=2, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="substate", - full_name="google.cloud.dataproc.v1.JobStatus.substate", - index=3, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_JOBSTATUS_STATE, _JOBSTATUS_SUBSTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3988, - serialized_end=4461, -) - - -_JOBREFERENCE = _descriptor.Descriptor( - name="JobReference", - full_name="google.cloud.dataproc.v1.JobReference", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.JobReference.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1.JobReference.job_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4463, - serialized_end=4523, -) - - -_YARNAPPLICATION = _descriptor.Descriptor( - name="YarnApplication", - full_name="google.cloud.dataproc.v1.YarnApplication", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1.YarnApplication.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1.YarnApplication.state", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.cloud.dataproc.v1.YarnApplication.progress", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tracking_url", - full_name="google.cloud.dataproc.v1.YarnApplication.tracking_url", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_YARNAPPLICATION_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4526, - serialized_end=4819, -) - - -_JOB_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1.Job.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.Job.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.Job.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5916, - serialized_end=5961, -) - -_JOB = _descriptor.Descriptor( - name="Job", - full_name="google.cloud.dataproc.v1.Job", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="reference", - full_name="google.cloud.dataproc.v1.Job.reference", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="placement", - full_name="google.cloud.dataproc.v1.Job.placement", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="hadoop_job", - full_name="google.cloud.dataproc.v1.Job.hadoop_job", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_job", - full_name="google.cloud.dataproc.v1.Job.spark_job", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pyspark_job", - full_name="google.cloud.dataproc.v1.Job.pyspark_job", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="hive_job", - full_name="google.cloud.dataproc.v1.Job.hive_job", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pig_job", - full_name="google.cloud.dataproc.v1.Job.pig_job", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_r_job", - full_name="google.cloud.dataproc.v1.Job.spark_r_job", - index=7, - number=21, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_sql_job", - full_name="google.cloud.dataproc.v1.Job.spark_sql_job", - index=8, - number=12, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="presto_job", - full_name="google.cloud.dataproc.v1.Job.presto_job", - index=9, - number=23, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1.Job.status", - index=10, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status_history", - full_name="google.cloud.dataproc.v1.Job.status_history", - index=11, - number=13, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="yarn_applications", - full_name="google.cloud.dataproc.v1.Job.yarn_applications", - index=12, - number=9, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="driver_output_resource_uri", - full_name="google.cloud.dataproc.v1.Job.driver_output_resource_uri", - index=13, - number=17, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="driver_control_files_uri", - full_name="google.cloud.dataproc.v1.Job.driver_control_files_uri", - index=14, - number=15, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1.Job.labels", - index=15, - number=18, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="scheduling", - full_name="google.cloud.dataproc.v1.Job.scheduling", - index=16, - number=20, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_uuid", - full_name="google.cloud.dataproc.v1.Job.job_uuid", - index=17, - number=22, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="done", - full_name="google.cloud.dataproc.v1.Job.done", - index=18, - number=24, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_JOB_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="type_job", - full_name="google.cloud.dataproc.v1.Job.type_job", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4822, - serialized_end=5973, -) - - -_JOBSCHEDULING = _descriptor.Descriptor( - name="JobScheduling", - full_name="google.cloud.dataproc.v1.JobScheduling", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="max_failures_per_hour", - full_name="google.cloud.dataproc.v1.JobScheduling.max_failures_per_hour", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5975, - serialized_end=6026, -) - - -_SUBMITJOBREQUEST = _descriptor.Descriptor( - name="SubmitJobRequest", - full_name="google.cloud.dataproc.v1.SubmitJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.SubmitJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.SubmitJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job", - full_name="google.cloud.dataproc.v1.SubmitJobRequest.job", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1.SubmitJobRequest.request_id", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6029, - serialized_end=6167, -) - - -_JOBMETADATA = _descriptor.Descriptor( - name="JobMetadata", - full_name="google.cloud.dataproc.v1.JobMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1.JobMetadata.job_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1.JobMetadata.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="operation_type", - full_name="google.cloud.dataproc.v1.JobMetadata.operation_type", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.cloud.dataproc.v1.JobMetadata.start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6170, - serialized_end=6344, -) - - -_GETJOBREQUEST = _descriptor.Descriptor( - name="GetJobRequest", - full_name="google.cloud.dataproc.v1.GetJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.GetJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.GetJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1.GetJobRequest.job_id", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6346, - serialized_end=6428, -) - - -_LISTJOBSREQUEST = _descriptor.Descriptor( - name="ListJobsRequest", - full_name="google.cloud.dataproc.v1.ListJobsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.ListJobsRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.ListJobsRequest.region", - index=1, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.dataproc.v1.ListJobsRequest.page_size", - index=2, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.dataproc.v1.ListJobsRequest.page_token", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.ListJobsRequest.cluster_name", - index=4, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_state_matcher", - full_name="google.cloud.dataproc.v1.ListJobsRequest.job_state_matcher", - index=5, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.dataproc.v1.ListJobsRequest.filter", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_LISTJOBSREQUEST_JOBSTATEMATCHER,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6431, - serialized_end=6738, -) - - -_UPDATEJOBREQUEST = _descriptor.Descriptor( - name="UpdateJobRequest", - full_name="google.cloud.dataproc.v1.UpdateJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.UpdateJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.UpdateJobRequest.region", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1.UpdateJobRequest.job_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job", - full_name="google.cloud.dataproc.v1.UpdateJobRequest.job", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.dataproc.v1.UpdateJobRequest.update_mask", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6741, - serialized_end=6929, -) - - -_LISTJOBSRESPONSE = _descriptor.Descriptor( - name="ListJobsResponse", - full_name="google.cloud.dataproc.v1.ListJobsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="jobs", - full_name="google.cloud.dataproc.v1.ListJobsResponse.jobs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.dataproc.v1.ListJobsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6931, - serialized_end=7029, -) - - -_CANCELJOBREQUEST = _descriptor.Descriptor( - name="CancelJobRequest", - full_name="google.cloud.dataproc.v1.CancelJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.CancelJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.CancelJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1.CancelJobRequest.job_id", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=7031, - serialized_end=7116, -) - - -_DELETEJOBREQUEST = _descriptor.Descriptor( - name="DeleteJobRequest", - full_name="google.cloud.dataproc.v1.DeleteJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1.DeleteJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1.DeleteJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1.DeleteJobRequest.job_id", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=7118, - serialized_end=7203, -) - -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY.fields_by_name[ - "value" -].enum_type = _LOGGINGCONFIG_LEVEL -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY.containing_type = _LOGGINGCONFIG -_LOGGINGCONFIG.fields_by_name[ - "driver_log_levels" -].message_type = _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY -_LOGGINGCONFIG_LEVEL.containing_type = _LOGGINGCONFIG -_HADOOPJOB_PROPERTIESENTRY.containing_type = _HADOOPJOB -_HADOOPJOB.fields_by_name["properties"].message_type = _HADOOPJOB_PROPERTIESENTRY -_HADOOPJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_HADOOPJOB.oneofs_by_name["driver"].fields.append( - _HADOOPJOB.fields_by_name["main_jar_file_uri"] -) -_HADOOPJOB.fields_by_name[ - "main_jar_file_uri" -].containing_oneof = _HADOOPJOB.oneofs_by_name["driver"] -_HADOOPJOB.oneofs_by_name["driver"].fields.append( - _HADOOPJOB.fields_by_name["main_class"] -) -_HADOOPJOB.fields_by_name["main_class"].containing_oneof = _HADOOPJOB.oneofs_by_name[ - "driver" -] -_SPARKJOB_PROPERTIESENTRY.containing_type = _SPARKJOB -_SPARKJOB.fields_by_name["properties"].message_type = _SPARKJOB_PROPERTIESENTRY -_SPARKJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_SPARKJOB.oneofs_by_name["driver"].fields.append( - _SPARKJOB.fields_by_name["main_jar_file_uri"] -) -_SPARKJOB.fields_by_name[ - "main_jar_file_uri" -].containing_oneof = _SPARKJOB.oneofs_by_name["driver"] -_SPARKJOB.oneofs_by_name["driver"].fields.append(_SPARKJOB.fields_by_name["main_class"]) -_SPARKJOB.fields_by_name["main_class"].containing_oneof = _SPARKJOB.oneofs_by_name[ - "driver" -] -_PYSPARKJOB_PROPERTIESENTRY.containing_type = _PYSPARKJOB -_PYSPARKJOB.fields_by_name["properties"].message_type = _PYSPARKJOB_PROPERTIESENTRY -_PYSPARKJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_HIVEJOB_SCRIPTVARIABLESENTRY.containing_type = _HIVEJOB -_HIVEJOB_PROPERTIESENTRY.containing_type = _HIVEJOB -_HIVEJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_HIVEJOB.fields_by_name["script_variables"].message_type = _HIVEJOB_SCRIPTVARIABLESENTRY -_HIVEJOB.fields_by_name["properties"].message_type = _HIVEJOB_PROPERTIESENTRY -_HIVEJOB.oneofs_by_name["queries"].fields.append( - _HIVEJOB.fields_by_name["query_file_uri"] -) -_HIVEJOB.fields_by_name["query_file_uri"].containing_oneof = _HIVEJOB.oneofs_by_name[ - "queries" -] -_HIVEJOB.oneofs_by_name["queries"].fields.append(_HIVEJOB.fields_by_name["query_list"]) -_HIVEJOB.fields_by_name["query_list"].containing_oneof = _HIVEJOB.oneofs_by_name[ - "queries" -] -_SPARKSQLJOB_SCRIPTVARIABLESENTRY.containing_type = _SPARKSQLJOB -_SPARKSQLJOB_PROPERTIESENTRY.containing_type = _SPARKSQLJOB -_SPARKSQLJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_SPARKSQLJOB.fields_by_name[ - "script_variables" -].message_type = _SPARKSQLJOB_SCRIPTVARIABLESENTRY -_SPARKSQLJOB.fields_by_name["properties"].message_type = _SPARKSQLJOB_PROPERTIESENTRY -_SPARKSQLJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_SPARKSQLJOB.oneofs_by_name["queries"].fields.append( - _SPARKSQLJOB.fields_by_name["query_file_uri"] -) -_SPARKSQLJOB.fields_by_name[ - "query_file_uri" -].containing_oneof = _SPARKSQLJOB.oneofs_by_name["queries"] -_SPARKSQLJOB.oneofs_by_name["queries"].fields.append( - _SPARKSQLJOB.fields_by_name["query_list"] -) -_SPARKSQLJOB.fields_by_name[ - "query_list" -].containing_oneof = _SPARKSQLJOB.oneofs_by_name["queries"] -_PIGJOB_SCRIPTVARIABLESENTRY.containing_type = _PIGJOB -_PIGJOB_PROPERTIESENTRY.containing_type = _PIGJOB -_PIGJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_PIGJOB.fields_by_name["script_variables"].message_type = _PIGJOB_SCRIPTVARIABLESENTRY -_PIGJOB.fields_by_name["properties"].message_type = _PIGJOB_PROPERTIESENTRY -_PIGJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_PIGJOB.oneofs_by_name["queries"].fields.append( - _PIGJOB.fields_by_name["query_file_uri"] -) -_PIGJOB.fields_by_name["query_file_uri"].containing_oneof = _PIGJOB.oneofs_by_name[ - "queries" -] -_PIGJOB.oneofs_by_name["queries"].fields.append(_PIGJOB.fields_by_name["query_list"]) -_PIGJOB.fields_by_name["query_list"].containing_oneof = _PIGJOB.oneofs_by_name[ - "queries" -] -_SPARKRJOB_PROPERTIESENTRY.containing_type = _SPARKRJOB -_SPARKRJOB.fields_by_name["properties"].message_type = _SPARKRJOB_PROPERTIESENTRY -_SPARKRJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_PRESTOJOB_PROPERTIESENTRY.containing_type = _PRESTOJOB -_PRESTOJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_PRESTOJOB.fields_by_name["properties"].message_type = _PRESTOJOB_PROPERTIESENTRY -_PRESTOJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_PRESTOJOB.oneofs_by_name["queries"].fields.append( - _PRESTOJOB.fields_by_name["query_file_uri"] -) -_PRESTOJOB.fields_by_name[ - "query_file_uri" -].containing_oneof = _PRESTOJOB.oneofs_by_name["queries"] -_PRESTOJOB.oneofs_by_name["queries"].fields.append( - _PRESTOJOB.fields_by_name["query_list"] -) -_PRESTOJOB.fields_by_name["query_list"].containing_oneof = _PRESTOJOB.oneofs_by_name[ - "queries" -] -_JOBSTATUS.fields_by_name["state"].enum_type = _JOBSTATUS_STATE -_JOBSTATUS.fields_by_name[ - "state_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_JOBSTATUS.fields_by_name["substate"].enum_type = _JOBSTATUS_SUBSTATE -_JOBSTATUS_STATE.containing_type = _JOBSTATUS -_JOBSTATUS_SUBSTATE.containing_type = _JOBSTATUS -_YARNAPPLICATION.fields_by_name["state"].enum_type = _YARNAPPLICATION_STATE -_YARNAPPLICATION_STATE.containing_type = _YARNAPPLICATION -_JOB_LABELSENTRY.containing_type = _JOB -_JOB.fields_by_name["reference"].message_type = _JOBREFERENCE -_JOB.fields_by_name["placement"].message_type = _JOBPLACEMENT -_JOB.fields_by_name["hadoop_job"].message_type = _HADOOPJOB -_JOB.fields_by_name["spark_job"].message_type = _SPARKJOB -_JOB.fields_by_name["pyspark_job"].message_type = _PYSPARKJOB -_JOB.fields_by_name["hive_job"].message_type = _HIVEJOB -_JOB.fields_by_name["pig_job"].message_type = _PIGJOB -_JOB.fields_by_name["spark_r_job"].message_type = _SPARKRJOB -_JOB.fields_by_name["spark_sql_job"].message_type = _SPARKSQLJOB -_JOB.fields_by_name["presto_job"].message_type = _PRESTOJOB -_JOB.fields_by_name["status"].message_type = _JOBSTATUS -_JOB.fields_by_name["status_history"].message_type = _JOBSTATUS -_JOB.fields_by_name["yarn_applications"].message_type = _YARNAPPLICATION -_JOB.fields_by_name["labels"].message_type = _JOB_LABELSENTRY -_JOB.fields_by_name["scheduling"].message_type = _JOBSCHEDULING -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["hadoop_job"]) -_JOB.fields_by_name["hadoop_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_job"]) -_JOB.fields_by_name["spark_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["pyspark_job"]) -_JOB.fields_by_name["pyspark_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["hive_job"]) -_JOB.fields_by_name["hive_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["pig_job"]) -_JOB.fields_by_name["pig_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_r_job"]) -_JOB.fields_by_name["spark_r_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_sql_job"]) -_JOB.fields_by_name["spark_sql_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["presto_job"]) -_JOB.fields_by_name["presto_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_SUBMITJOBREQUEST.fields_by_name["job"].message_type = _JOB -_JOBMETADATA.fields_by_name["status"].message_type = _JOBSTATUS -_JOBMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LISTJOBSREQUEST.fields_by_name[ - "job_state_matcher" -].enum_type = _LISTJOBSREQUEST_JOBSTATEMATCHER -_LISTJOBSREQUEST_JOBSTATEMATCHER.containing_type = _LISTJOBSREQUEST -_UPDATEJOBREQUEST.fields_by_name["job"].message_type = _JOB -_UPDATEJOBREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTJOBSRESPONSE.fields_by_name["jobs"].message_type = _JOB -DESCRIPTOR.message_types_by_name["LoggingConfig"] = _LOGGINGCONFIG -DESCRIPTOR.message_types_by_name["HadoopJob"] = _HADOOPJOB -DESCRIPTOR.message_types_by_name["SparkJob"] = _SPARKJOB -DESCRIPTOR.message_types_by_name["PySparkJob"] = _PYSPARKJOB -DESCRIPTOR.message_types_by_name["QueryList"] = _QUERYLIST -DESCRIPTOR.message_types_by_name["HiveJob"] = _HIVEJOB -DESCRIPTOR.message_types_by_name["SparkSqlJob"] = _SPARKSQLJOB -DESCRIPTOR.message_types_by_name["PigJob"] = _PIGJOB -DESCRIPTOR.message_types_by_name["SparkRJob"] = _SPARKRJOB -DESCRIPTOR.message_types_by_name["PrestoJob"] = _PRESTOJOB -DESCRIPTOR.message_types_by_name["JobPlacement"] = _JOBPLACEMENT -DESCRIPTOR.message_types_by_name["JobStatus"] = _JOBSTATUS -DESCRIPTOR.message_types_by_name["JobReference"] = _JOBREFERENCE -DESCRIPTOR.message_types_by_name["YarnApplication"] = _YARNAPPLICATION -DESCRIPTOR.message_types_by_name["Job"] = _JOB -DESCRIPTOR.message_types_by_name["JobScheduling"] = _JOBSCHEDULING -DESCRIPTOR.message_types_by_name["SubmitJobRequest"] = _SUBMITJOBREQUEST -DESCRIPTOR.message_types_by_name["JobMetadata"] = _JOBMETADATA -DESCRIPTOR.message_types_by_name["GetJobRequest"] = _GETJOBREQUEST -DESCRIPTOR.message_types_by_name["ListJobsRequest"] = _LISTJOBSREQUEST -DESCRIPTOR.message_types_by_name["UpdateJobRequest"] = _UPDATEJOBREQUEST -DESCRIPTOR.message_types_by_name["ListJobsResponse"] = _LISTJOBSRESPONSE -DESCRIPTOR.message_types_by_name["CancelJobRequest"] = _CANCELJOBREQUEST -DESCRIPTOR.message_types_by_name["DeleteJobRequest"] = _DELETEJOBREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -LoggingConfig = _reflection.GeneratedProtocolMessageType( - "LoggingConfig", - (_message.Message,), - { - "DriverLogLevelsEntry": _reflection.GeneratedProtocolMessageType( - "DriverLogLevelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.LoggingConfig.DriverLogLevelsEntry) - }, - ), - "DESCRIPTOR": _LOGGINGCONFIG, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """The runtime logging config of the job. - - Attributes: - driver_log_levels: - The per-package log levels for the driver. This may include - “root” package name to configure rootLogger. Examples: - ‘com.google = FATAL’, ‘root = INFO’, ‘org.apache = DEBUG’ - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.LoggingConfig) - }, -) -_sym_db.RegisterMessage(LoggingConfig) -_sym_db.RegisterMessage(LoggingConfig.DriverLogLevelsEntry) - -HadoopJob = _reflection.GeneratedProtocolMessageType( - "HadoopJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _HADOOPJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.HadoopJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _HADOOPJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Hadoop MapReduce - `__ jobs on - `Apache Hadoop YARN `__. - - Attributes: - driver: - Required. Indicates the location of the driver’s main class. - Specify either the jar file that contains the main class or - the main class name. To specify both, add the jar file to - ``jar_file_uris``, and then specify the main class name in - this property. - main_jar_file_uri: - The HCFS URI of the jar file containing the main class. - Examples: ‘gs://foo-bucket/analytics-binaries/extract-useful- - metrics-mr.jar’ ‘hdfs:/tmp/test-samples/custom-wordcount.jar’ - ‘file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce- - examples.jar’ - main_class: - The name of the driver’s main class. The jar file containing - the class must be in the default CLASSPATH or specified in - ``jar_file_uris``. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``-libjars`` or ``-Dfoo=bar``, that can be - set as job properties, since a collision may occur that causes - an incorrect job submission. - jar_file_uris: - Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop - driver and tasks. - file_uris: - Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to - be copied to the working directory of Hadoop drivers and - distributed tasks. Useful for naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted in the working - directory of Hadoop drivers and tasks. Supported file types: - .jar, .tar, .tar.gz, .tgz, or .zip. - properties: - Optional. A mapping of property names to values, used to - configure Hadoop. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in ``/etc/hadoop/conf/*-site`` and classes in user code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.HadoopJob) - }, -) -_sym_db.RegisterMessage(HadoopJob) -_sym_db.RegisterMessage(HadoopJob.PropertiesEntry) - -SparkJob = _reflection.GeneratedProtocolMessageType( - "SparkJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SPARKJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Spark `__ - applications on YARN. - - Attributes: - driver: - Required. The specification of the main method to call to - drive the job. Specify either the jar file that contains the - main class or the main class name. To pass both a main jar and - a main class in that jar, add the jar to - ``CommonJob.jar_file_uris``, and then specify the main class - name in ``main_class``. - main_jar_file_uri: - The HCFS URI of the jar file that contains the main class. - main_class: - The name of the driver’s main class. The jar file that - contains the class must be in the default CLASSPATH or - specified in ``jar_file_uris``. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``--conf``, that can be set as job - properties, since a collision may occur that causes an - incorrect job submission. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATHs of - the Spark driver and tasks. - file_uris: - Optional. HCFS URIs of files to be placed in the working - directory of each executor. Useful for naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted into the - working directory of each executor. Supported file types: - .jar, .tar, .tar.gz, .tgz, and .zip. - properties: - Optional. A mapping of property names to values, used to - configure Spark. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/spark/conf/spark-defaults.conf and classes in user - code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkJob) - }, -) -_sym_db.RegisterMessage(SparkJob) -_sym_db.RegisterMessage(SparkJob.PropertiesEntry) - -PySparkJob = _reflection.GeneratedProtocolMessageType( - "PySparkJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PYSPARKJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PySparkJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _PYSPARKJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache PySpark - `__ - applications on YARN. - - Attributes: - main_python_file_uri: - Required. The HCFS URI of the main Python file to use as the - driver. Must be a .py file. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``--conf``, that can be set as job - properties, since a collision may occur that causes an - incorrect job submission. - python_file_uris: - Optional. HCFS file URIs of Python files to pass to the - PySpark framework. Supported file types: .py, .egg, and .zip. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATHs of - the Python driver and tasks. - file_uris: - Optional. HCFS URIs of files to be placed in the working - directory of each executor. Useful for naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted into the - working directory of each executor. Supported file types: - .jar, .tar, .tar.gz, .tgz, and .zip. - properties: - Optional. A mapping of property names to values, used to - configure PySpark. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/spark/conf/spark-defaults.conf and classes in user - code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PySparkJob) - }, -) -_sym_db.RegisterMessage(PySparkJob) -_sym_db.RegisterMessage(PySparkJob.PropertiesEntry) - -QueryList = _reflection.GeneratedProtocolMessageType( - "QueryList", - (_message.Message,), - { - "DESCRIPTOR": _QUERYLIST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A list of queries to run on a cluster. - - Attributes: - queries: - Required. The queries to execute. You do not need to terminate - a query with a semicolon. Multiple queries can be specified in - one string by separating each with a semicolon. Here is an - example of an Cloud Dataproc API snippet that uses a QueryList - to specify a HiveJob: “hiveJob”: { “queryList”: { “queries”: - [ “query1”, “query2”, “query3;query4”, ] } } - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.QueryList) - }, -) -_sym_db.RegisterMessage(QueryList) - -HiveJob = _reflection.GeneratedProtocolMessageType( - "HiveJob", - (_message.Message,), - { - "ScriptVariablesEntry": _reflection.GeneratedProtocolMessageType( - "ScriptVariablesEntry", - (_message.Message,), - { - "DESCRIPTOR": _HIVEJOB_SCRIPTVARIABLESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.HiveJob.ScriptVariablesEntry) - }, - ), - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _HIVEJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.HiveJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _HIVEJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Hive `__ - queries on YARN. - - Attributes: - queries: - Required. The sequence of Hive queries to execute, specified - as either an HCFS file URI or a list of queries. - query_file_uri: - The HCFS URI of the script that contains Hive queries. - query_list: - A list of queries. - continue_on_failure: - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` can - be useful when executing independent parallel queries. - script_variables: - Optional. Mapping of query variable names to values - (equivalent to the Hive command: ``SET name="value";``). - properties: - Optional. A mapping of property names and values, used to - configure Hive. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in ``/etc/hadoop/conf/*-site``.xml, /etc/hive/conf/hive- - site.xml, and classes in user code. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATH of - the Hive server and Hadoop MapReduce (MR) tasks. Can contain - Hive SerDes and UDFs. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.HiveJob) - }, -) -_sym_db.RegisterMessage(HiveJob) -_sym_db.RegisterMessage(HiveJob.ScriptVariablesEntry) -_sym_db.RegisterMessage(HiveJob.PropertiesEntry) - -SparkSqlJob = _reflection.GeneratedProtocolMessageType( - "SparkSqlJob", - (_message.Message,), - { - "ScriptVariablesEntry": _reflection.GeneratedProtocolMessageType( - "ScriptVariablesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKSQLJOB_SCRIPTVARIABLESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkSqlJob.ScriptVariablesEntry) - }, - ), - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKSQLJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkSqlJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SPARKSQLJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Spark SQL - `__ queries. - - Attributes: - queries: - Required. The sequence of Spark SQL queries to execute, - specified as either an HCFS file URI or as a list of queries. - query_file_uri: - The HCFS URI of the script that contains SQL queries. - query_list: - A list of queries. - script_variables: - Optional. Mapping of query variable names to values - (equivalent to the Spark SQL command: SET ``name="value";``). - properties: - Optional. A mapping of property names to values, used to - configure Spark SQL’s SparkConf. Properties that conflict with - values set by the Dataproc API may be overwritten. - jar_file_uris: - Optional. HCFS URIs of jar files to be added to the Spark - CLASSPATH. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkSqlJob) - }, -) -_sym_db.RegisterMessage(SparkSqlJob) -_sym_db.RegisterMessage(SparkSqlJob.ScriptVariablesEntry) -_sym_db.RegisterMessage(SparkSqlJob.PropertiesEntry) - -PigJob = _reflection.GeneratedProtocolMessageType( - "PigJob", - (_message.Message,), - { - "ScriptVariablesEntry": _reflection.GeneratedProtocolMessageType( - "ScriptVariablesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PIGJOB_SCRIPTVARIABLESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PigJob.ScriptVariablesEntry) - }, - ), - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PIGJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PigJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _PIGJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Pig `__ - queries on YARN. - - Attributes: - queries: - Required. The sequence of Pig queries to execute, specified as - an HCFS file URI or a list of queries. - query_file_uri: - The HCFS URI of the script that contains the Pig queries. - query_list: - A list of queries. - continue_on_failure: - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` can - be useful when executing independent parallel queries. - script_variables: - Optional. Mapping of query variable names to values - (equivalent to the Pig command: ``name=[value]``). - properties: - Optional. A mapping of property names to values, used to - configure Pig. Properties that conflict with values set by the - Dataproc API may be overwritten. Can include properties set in - ``/etc/hadoop/conf/*-site``.xml, /etc/pig/conf/pig.properties, and - classes in user code. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATH of - the Pig Client and Hadoop MapReduce (MR) tasks. Can contain - Pig UDFs. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PigJob) - }, -) -_sym_db.RegisterMessage(PigJob) -_sym_db.RegisterMessage(PigJob.ScriptVariablesEntry) -_sym_db.RegisterMessage(PigJob.PropertiesEntry) - -SparkRJob = _reflection.GeneratedProtocolMessageType( - "SparkRJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKRJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkRJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SPARKRJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache SparkR - `__ applications on - YARN. - - Attributes: - main_r_file_uri: - Required. The HCFS URI of the main R file to use as the - driver. Must be a .R file. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``--conf``, that can be set as job - properties, since a collision may occur that causes an - incorrect job submission. - file_uris: - Optional. HCFS URIs of files to be placed in the working - directory of each executor. Useful for naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted into the - working directory of each executor. Supported file types: - .jar, .tar, .tar.gz, .tgz, and .zip. - properties: - Optional. A mapping of property names to values, used to - configure SparkR. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/spark/conf/spark-defaults.conf and classes in user - code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SparkRJob) - }, -) -_sym_db.RegisterMessage(SparkRJob) -_sym_db.RegisterMessage(SparkRJob.PropertiesEntry) - -PrestoJob = _reflection.GeneratedProtocolMessageType( - "PrestoJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PRESTOJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PrestoJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _PRESTOJOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Presto `__ queries. - **IMPORTANT**: The `Dataproc Presto Optional Component - `__ - must be enabled when the cluster is created to submit a Presto job to - the cluster. - - Attributes: - queries: - Required. The sequence of Presto queries to execute, specified - as either an HCFS file URI or as a list of queries. - query_file_uri: - The HCFS URI of the script that contains SQL queries. - query_list: - A list of queries. - continue_on_failure: - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` can - be useful when executing independent parallel queries. - output_format: - Optional. The format in which query output will be displayed. - See the Presto documentation for supported output formats - client_tags: - Optional. Presto client tags to attach to this query - properties: - Optional. A mapping of property names to values. Used to set - Presto `session properties - `__ - Equivalent to using the –session flag in the Presto CLI - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.PrestoJob) - }, -) -_sym_db.RegisterMessage(PrestoJob) -_sym_db.RegisterMessage(PrestoJob.PropertiesEntry) - -JobPlacement = _reflection.GeneratedProtocolMessageType( - "JobPlacement", - (_message.Message,), - { - "DESCRIPTOR": _JOBPLACEMENT, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """Dataproc job config. - - Attributes: - cluster_name: - Required. The name of the cluster where the job will be - submitted. - cluster_uuid: - Output only. A cluster UUID generated by the Dataproc service - when the job is submitted. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.JobPlacement) - }, -) -_sym_db.RegisterMessage(JobPlacement) - -JobStatus = _reflection.GeneratedProtocolMessageType( - "JobStatus", - (_message.Message,), - { - "DESCRIPTOR": _JOBSTATUS, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """Dataproc job status. - - Attributes: - state: - Output only. A state message specifying the overall job state. - details: - Optional. Output only. Job state details, such as an error - description if the state is ERROR. - state_start_time: - Output only. The time when this state was entered. - substate: - Output only. Additional state information, which includes - status reported by the agent. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.JobStatus) - }, -) -_sym_db.RegisterMessage(JobStatus) - -JobReference = _reflection.GeneratedProtocolMessageType( - "JobReference", - (_message.Message,), - { - "DESCRIPTOR": _JOBREFERENCE, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """Encapsulates the full scoping used to reference a job. - - Attributes: - project_id: - Optional. The ID of the Google Cloud Platform project that the - job belongs to. If specified, must match the request project - ID. - job_id: - Optional. The job ID, which must be unique within the project. - The ID must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), or hyphens (-). The maximum length is 100 - characters. If not specified by the caller, the job ID will - be provided by the server. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.JobReference) - }, -) -_sym_db.RegisterMessage(JobReference) - -YarnApplication = _reflection.GeneratedProtocolMessageType( - "YarnApplication", - (_message.Message,), - { - "DESCRIPTOR": _YARNAPPLICATION, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A YARN application created by a job. Application information is a - subset of - org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. - **Beta Feature**: This report is available for testing purposes only. - It may be changed before final release. - - Attributes: - name: - Required. The application name. - state: - Required. The application state. - progress: - Required. The numerical progress of the application, from 1 to - 100. - tracking_url: - Optional. The HTTP URL of the ApplicationMaster, - HistoryServer, or TimelineServer that provides application- - specific information. The URL uses the internal hostname, and - requires a proxy server for resolution and, possibly, access. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.YarnApplication) - }, -) -_sym_db.RegisterMessage(YarnApplication) - -Job = _reflection.GeneratedProtocolMessageType( - "Job", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _JOB_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Job.LabelsEntry) - }, - ), - "DESCRIPTOR": _JOB, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A Dataproc job resource. - - Attributes: - reference: - Optional. The fully qualified reference to the job, which can - be used to obtain the equivalent REST path of the job - resource. If this property is not specified when a job is - created, the server generates a job_id. - placement: - Required. Job information, including how, when, and where to - run the job. - type_job: - Required. The application/framework-specific portion of the - job. - hadoop_job: - Optional. Job is a Hadoop job. - spark_job: - Optional. Job is a Spark job. - pyspark_job: - Optional. Job is a PySpark job. - hive_job: - Optional. Job is a Hive job. - pig_job: - Optional. Job is a Pig job. - spark_r_job: - Optional. Job is a SparkR job. - spark_sql_job: - Optional. Job is a SparkSql job. - presto_job: - Optional. Job is a Presto job. - status: - Output only. The job status. Additional application-specific - status information may be contained in the type_job and - yarn_applications fields. - status_history: - Output only. The previous job status. - yarn_applications: - Output only. The collection of YARN applications spun up by - this job. **Beta** Feature: This report is available for - testing purposes only. It may be changed before final release. - driver_output_resource_uri: - Output only. A URI pointing to the location of the stdout of - the job’s driver program. - driver_control_files_uri: - Output only. If present, the location of miscellaneous control - files which may be used as part of job setup and handling. If - not present, control files may be placed in the same location - as ``driver_output_uri``. - labels: - Optional. The labels to associate with this job. Label - **keys** must contain 1 to 63 characters, and must conform to - `RFC 1035 `__. Label - **values** may be empty, but, if present, must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. No more than 32 - labels can be associated with a job. - scheduling: - Optional. Job scheduling configuration. - job_uuid: - Output only. A UUID that uniquely identifies a job within the - project over time. This is in contrast to a user-settable - reference.job_id that may be reused over time. - done: - Output only. Indicates whether the job is completed. If the - value is ``false``, the job is still in progress. If ``true``, - the job is completed, and ``status.state`` field will indicate - if it was successful, failed, or cancelled. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.Job) - }, -) -_sym_db.RegisterMessage(Job) -_sym_db.RegisterMessage(Job.LabelsEntry) - -JobScheduling = _reflection.GeneratedProtocolMessageType( - "JobScheduling", - (_message.Message,), - { - "DESCRIPTOR": _JOBSCHEDULING, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """Job scheduling options. - - Attributes: - max_failures_per_hour: - Optional. Maximum number of times per hour a driver may be - restarted as a result of driver terminating with non-zero code - before job is reported failed. A job may be reported as - thrashing if driver exits with non-zero code 4 times within 10 - minute window. Maximum value is 10. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.JobScheduling) - }, -) -_sym_db.RegisterMessage(JobScheduling) - -SubmitJobRequest = _reflection.GeneratedProtocolMessageType( - "SubmitJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _SUBMITJOBREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A request to submit a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job: - Required. The job resource. - request_id: - Optional. A unique id used to identify the request. If the - server receives two - [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] - requests with the same id, then the second request will be - ignored and the first [Job][google.cloud.dataproc.v1.Job] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.SubmitJobRequest) - }, -) -_sym_db.RegisterMessage(SubmitJobRequest) - -JobMetadata = _reflection.GeneratedProtocolMessageType( - "JobMetadata", - (_message.Message,), - { - "DESCRIPTOR": _JOBMETADATA, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """Job Operation metadata. - - Attributes: - job_id: - Output only. The job id. - status: - Output only. Most recent job status. - operation_type: - Output only. Operation type. - start_time: - Output only. Job submission time. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.JobMetadata) - }, -) -_sym_db.RegisterMessage(JobMetadata) - -GetJobRequest = _reflection.GeneratedProtocolMessageType( - "GetJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETJOBREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A request to get the resource representation for a job in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GetJobRequest) - }, -) -_sym_db.RegisterMessage(GetJobRequest) - -ListJobsRequest = _reflection.GeneratedProtocolMessageType( - "ListJobsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTJOBSREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A request to list jobs in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - page_size: - Optional. The number of results to return in each response. - page_token: - Optional. The page token, returned by a previous call, to - request the next page of results. - cluster_name: - Optional. If set, the returned jobs list includes only jobs - that were submitted to the named cluster. - job_state_matcher: - Optional. Specifies enumerated categories of jobs to list. - (default = match ALL jobs). If ``filter`` is provided, - ``jobStateMatcher`` will be ignored. - filter: - Optional. A filter constraining the jobs to list. Filters are - case-sensitive and have the following syntax: [field = value] - AND [field [= value]] … where **field** is ``status.state`` - or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** - can be ``*`` to match all values. ``status.state`` can be - either ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` - operator is supported; space-separated items are treated as - having an implicit ``AND`` operator. Example filter: - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListJobsRequest) - }, -) -_sym_db.RegisterMessage(ListJobsRequest) - -UpdateJobRequest = _reflection.GeneratedProtocolMessageType( - "UpdateJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEJOBREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A request to update a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - job: - Required. The changes to the job. - update_mask: - Required. Specifies the path, relative to Job, of the field to - update. For example, to update the labels of a Job the - update_mask parameter would be specified as labels, and the - ``PATCH`` request body would specify the new value. Note: - Currently, labels is the only field that can be updated. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.UpdateJobRequest) - }, -) -_sym_db.RegisterMessage(UpdateJobRequest) - -ListJobsResponse = _reflection.GeneratedProtocolMessageType( - "ListJobsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTJOBSRESPONSE, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A list of jobs in a project. - - Attributes: - jobs: - Output only. Jobs list. - next_page_token: - Optional. This token is included in the response if there are - more results to fetch. To fetch additional results, provide - this value as the ``page_token`` in a subsequent - ListJobsRequest. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListJobsResponse) - }, -) -_sym_db.RegisterMessage(ListJobsResponse) - -CancelJobRequest = _reflection.GeneratedProtocolMessageType( - "CancelJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _CANCELJOBREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A request to cancel a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.CancelJobRequest) - }, -) -_sym_db.RegisterMessage(CancelJobRequest) - -DeleteJobRequest = _reflection.GeneratedProtocolMessageType( - "DeleteJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEJOBREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.jobs_pb2", - "__doc__": """A request to delete a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DeleteJobRequest) - }, -) -_sym_db.RegisterMessage(DeleteJobRequest) - - -DESCRIPTOR._options = None -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY._options = None -_HADOOPJOB_PROPERTIESENTRY._options = None -_HADOOPJOB.fields_by_name["args"]._options = None -_HADOOPJOB.fields_by_name["jar_file_uris"]._options = None -_HADOOPJOB.fields_by_name["file_uris"]._options = None -_HADOOPJOB.fields_by_name["archive_uris"]._options = None -_HADOOPJOB.fields_by_name["properties"]._options = None -_HADOOPJOB.fields_by_name["logging_config"]._options = None -_SPARKJOB_PROPERTIESENTRY._options = None -_SPARKJOB.fields_by_name["args"]._options = None -_SPARKJOB.fields_by_name["jar_file_uris"]._options = None -_SPARKJOB.fields_by_name["file_uris"]._options = None -_SPARKJOB.fields_by_name["archive_uris"]._options = None -_SPARKJOB.fields_by_name["properties"]._options = None -_SPARKJOB.fields_by_name["logging_config"]._options = None -_PYSPARKJOB_PROPERTIESENTRY._options = None -_PYSPARKJOB.fields_by_name["main_python_file_uri"]._options = None -_PYSPARKJOB.fields_by_name["args"]._options = None -_PYSPARKJOB.fields_by_name["python_file_uris"]._options = None -_PYSPARKJOB.fields_by_name["jar_file_uris"]._options = None -_PYSPARKJOB.fields_by_name["file_uris"]._options = None -_PYSPARKJOB.fields_by_name["archive_uris"]._options = None -_PYSPARKJOB.fields_by_name["properties"]._options = None -_PYSPARKJOB.fields_by_name["logging_config"]._options = None -_QUERYLIST.fields_by_name["queries"]._options = None -_HIVEJOB_SCRIPTVARIABLESENTRY._options = None -_HIVEJOB_PROPERTIESENTRY._options = None -_HIVEJOB.fields_by_name["continue_on_failure"]._options = None -_HIVEJOB.fields_by_name["script_variables"]._options = None -_HIVEJOB.fields_by_name["properties"]._options = None -_HIVEJOB.fields_by_name["jar_file_uris"]._options = None -_SPARKSQLJOB_SCRIPTVARIABLESENTRY._options = None -_SPARKSQLJOB_PROPERTIESENTRY._options = None -_SPARKSQLJOB.fields_by_name["script_variables"]._options = None -_SPARKSQLJOB.fields_by_name["properties"]._options = None -_SPARKSQLJOB.fields_by_name["jar_file_uris"]._options = None -_SPARKSQLJOB.fields_by_name["logging_config"]._options = None -_PIGJOB_SCRIPTVARIABLESENTRY._options = None -_PIGJOB_PROPERTIESENTRY._options = None -_PIGJOB.fields_by_name["continue_on_failure"]._options = None -_PIGJOB.fields_by_name["script_variables"]._options = None -_PIGJOB.fields_by_name["properties"]._options = None -_PIGJOB.fields_by_name["jar_file_uris"]._options = None -_PIGJOB.fields_by_name["logging_config"]._options = None -_SPARKRJOB_PROPERTIESENTRY._options = None -_SPARKRJOB.fields_by_name["main_r_file_uri"]._options = None -_SPARKRJOB.fields_by_name["args"]._options = None -_SPARKRJOB.fields_by_name["file_uris"]._options = None -_SPARKRJOB.fields_by_name["archive_uris"]._options = None -_SPARKRJOB.fields_by_name["properties"]._options = None -_SPARKRJOB.fields_by_name["logging_config"]._options = None -_PRESTOJOB_PROPERTIESENTRY._options = None -_PRESTOJOB.fields_by_name["continue_on_failure"]._options = None -_PRESTOJOB.fields_by_name["output_format"]._options = None -_PRESTOJOB.fields_by_name["client_tags"]._options = None -_PRESTOJOB.fields_by_name["properties"]._options = None -_PRESTOJOB.fields_by_name["logging_config"]._options = None -_JOBPLACEMENT.fields_by_name["cluster_name"]._options = None -_JOBPLACEMENT.fields_by_name["cluster_uuid"]._options = None -_JOBSTATUS.fields_by_name["state"]._options = None -_JOBSTATUS.fields_by_name["details"]._options = None -_JOBSTATUS.fields_by_name["state_start_time"]._options = None -_JOBSTATUS.fields_by_name["substate"]._options = None -_JOBREFERENCE.fields_by_name["project_id"]._options = None -_JOBREFERENCE.fields_by_name["job_id"]._options = None -_YARNAPPLICATION.fields_by_name["name"]._options = None -_YARNAPPLICATION.fields_by_name["state"]._options = None -_YARNAPPLICATION.fields_by_name["progress"]._options = None -_YARNAPPLICATION.fields_by_name["tracking_url"]._options = None -_JOB_LABELSENTRY._options = None -_JOB.fields_by_name["reference"]._options = None -_JOB.fields_by_name["placement"]._options = None -_JOB.fields_by_name["hadoop_job"]._options = None -_JOB.fields_by_name["spark_job"]._options = None -_JOB.fields_by_name["pyspark_job"]._options = None -_JOB.fields_by_name["hive_job"]._options = None -_JOB.fields_by_name["pig_job"]._options = None -_JOB.fields_by_name["spark_r_job"]._options = None -_JOB.fields_by_name["spark_sql_job"]._options = None -_JOB.fields_by_name["presto_job"]._options = None -_JOB.fields_by_name["status"]._options = None -_JOB.fields_by_name["status_history"]._options = None -_JOB.fields_by_name["yarn_applications"]._options = None -_JOB.fields_by_name["driver_output_resource_uri"]._options = None -_JOB.fields_by_name["driver_control_files_uri"]._options = None -_JOB.fields_by_name["labels"]._options = None -_JOB.fields_by_name["scheduling"]._options = None -_JOB.fields_by_name["job_uuid"]._options = None -_JOB.fields_by_name["done"]._options = None -_JOBSCHEDULING.fields_by_name["max_failures_per_hour"]._options = None -_SUBMITJOBREQUEST.fields_by_name["project_id"]._options = None -_SUBMITJOBREQUEST.fields_by_name["region"]._options = None -_SUBMITJOBREQUEST.fields_by_name["job"]._options = None -_SUBMITJOBREQUEST.fields_by_name["request_id"]._options = None -_JOBMETADATA.fields_by_name["job_id"]._options = None -_JOBMETADATA.fields_by_name["status"]._options = None -_JOBMETADATA.fields_by_name["operation_type"]._options = None -_JOBMETADATA.fields_by_name["start_time"]._options = None -_GETJOBREQUEST.fields_by_name["project_id"]._options = None -_GETJOBREQUEST.fields_by_name["region"]._options = None -_GETJOBREQUEST.fields_by_name["job_id"]._options = None -_LISTJOBSREQUEST.fields_by_name["project_id"]._options = None -_LISTJOBSREQUEST.fields_by_name["region"]._options = None -_LISTJOBSREQUEST.fields_by_name["page_size"]._options = None -_LISTJOBSREQUEST.fields_by_name["page_token"]._options = None -_LISTJOBSREQUEST.fields_by_name["cluster_name"]._options = None -_LISTJOBSREQUEST.fields_by_name["job_state_matcher"]._options = None -_LISTJOBSREQUEST.fields_by_name["filter"]._options = None -_UPDATEJOBREQUEST.fields_by_name["project_id"]._options = None -_UPDATEJOBREQUEST.fields_by_name["region"]._options = None -_UPDATEJOBREQUEST.fields_by_name["job_id"]._options = None -_UPDATEJOBREQUEST.fields_by_name["job"]._options = None -_UPDATEJOBREQUEST.fields_by_name["update_mask"]._options = None -_LISTJOBSRESPONSE.fields_by_name["jobs"]._options = None -_LISTJOBSRESPONSE.fields_by_name["next_page_token"]._options = None -_CANCELJOBREQUEST.fields_by_name["project_id"]._options = None -_CANCELJOBREQUEST.fields_by_name["region"]._options = None -_CANCELJOBREQUEST.fields_by_name["job_id"]._options = None -_DELETEJOBREQUEST.fields_by_name["project_id"]._options = None -_DELETEJOBREQUEST.fields_by_name["region"]._options = None -_DELETEJOBREQUEST.fields_by_name["job_id"]._options = None - -_JOBCONTROLLER = _descriptor.ServiceDescriptor( - name="JobController", - full_name="google.cloud.dataproc.v1.JobController", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=7206, - serialized_end=8610, - methods=[ - _descriptor.MethodDescriptor( - name="SubmitJob", - full_name="google.cloud.dataproc.v1.JobController.SubmitJob", - index=0, - containing_service=None, - input_type=_SUBMITJOBREQUEST, - output_type=_JOB, - serialized_options=b'\202\323\344\223\002;"6/v1/projects/{project_id}/regions/{region}/jobs:submit:\001*\332A\025project_id,region,job', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SubmitJobAsOperation", - full_name="google.cloud.dataproc.v1.JobController.SubmitJobAsOperation", - index=1, - containing_service=None, - input_type=_SUBMITJOBREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002F"A/v1/projects/{project_id}/regions/{region}/jobs:submitAsOperation:\001*\332A\027project_id, region, job\312A\022\n\003Job\022\013JobMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetJob", - full_name="google.cloud.dataproc.v1.JobController.GetJob", - index=2, - containing_service=None, - input_type=_GETJOBREQUEST, - output_type=_JOB, - serialized_options=b"\202\323\344\223\002:\0228/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\332A\030project_id,region,job_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListJobs", - full_name="google.cloud.dataproc.v1.JobController.ListJobs", - index=3, - containing_service=None, - input_type=_LISTJOBSREQUEST, - output_type=_LISTJOBSRESPONSE, - serialized_options=b"\202\323\344\223\0021\022//v1/projects/{project_id}/regions/{region}/jobs\332A\021project_id,region\332A\030project_id,region,filter", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateJob", - full_name="google.cloud.dataproc.v1.JobController.UpdateJob", - index=4, - containing_service=None, - input_type=_UPDATEJOBREQUEST, - output_type=_JOB, - serialized_options=b"\202\323\344\223\002?28/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:\003job", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CancelJob", - full_name="google.cloud.dataproc.v1.JobController.CancelJob", - index=5, - containing_service=None, - input_type=_CANCELJOBREQUEST, - output_type=_JOB, - serialized_options=b'\202\323\344\223\002D"?/v1/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\001*\332A\030project_id,region,job_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteJob", - full_name="google.cloud.dataproc.v1.JobController.DeleteJob", - index=6, - containing_service=None, - input_type=_DELETEJOBREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002:*8/v1/projects/{project_id}/regions/{region}/jobs/{job_id}\332A\030project_id,region,job_id", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_JOBCONTROLLER) - -DESCRIPTOR.services_by_name["JobController"] = _JOBCONTROLLER - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py b/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py deleted file mode 100644 index 106082cb..00000000 --- a/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py +++ /dev/null @@ -1,356 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.dataproc_v1.proto import ( - jobs_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class JobControllerStub(object): - """The JobController provides methods to manage jobs. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.SubmitJob = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/SubmitJob", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.SubmitJobAsOperation = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetJob = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/GetJob", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.ListJobs = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/ListJobs", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString, - ) - self.UpdateJob = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/UpdateJob", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.CancelJob = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/CancelJob", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.DeleteJob = channel.unary_unary( - "/google.cloud.dataproc.v1.JobController/DeleteJob", - request_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class JobControllerServicer(object): - """The JobController provides methods to manage jobs. - """ - - def SubmitJob(self, request, context): - """Submits a job to a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SubmitJobAsOperation(self, request, context): - """Submits job to a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetJob(self, request, context): - """Gets the resource representation for a job in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListJobs(self, request, context): - """Lists regions/{region}/jobs in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateJob(self, request, context): - """Updates a job in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CancelJob(self, request, context): - """Starts a job cancellation request. To access the job resource - after cancellation, call - [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) - or - [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteJob(self, request, context): - """Deletes the job from the project. If the job is active, the delete fails, - and the response returns `FAILED_PRECONDITION`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_JobControllerServicer_to_server(servicer, server): - rpc_method_handlers = { - "SubmitJob": grpc.unary_unary_rpc_method_handler( - servicer.SubmitJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "SubmitJobAsOperation": grpc.unary_unary_rpc_method_handler( - servicer.SubmitJobAsOperation, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetJob": grpc.unary_unary_rpc_method_handler( - servicer.GetJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "ListJobs": grpc.unary_unary_rpc_method_handler( - servicer.ListJobs, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.SerializeToString, - ), - "UpdateJob": grpc.unary_unary_rpc_method_handler( - servicer.UpdateJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "CancelJob": grpc.unary_unary_rpc_method_handler( - servicer.CancelJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "DeleteJob": grpc.unary_unary_rpc_method_handler( - servicer.DeleteJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.dataproc.v1.JobController", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class JobController(object): - """The JobController provides methods to manage jobs. - """ - - @staticmethod - def SubmitJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/SubmitJob", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SubmitJobAsOperation( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/GetJob", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListJobs( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/ListJobs", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/UpdateJob", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CancelJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/CancelJob", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1.JobController/DeleteJob", - google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/dataproc_v1/proto/operations_pb2.py b/google/cloud/dataproc_v1/proto/operations_pb2.py deleted file mode 100644 index f8ed3ca8..00000000 --- a/google/cloud/dataproc_v1/proto/operations_pb2.py +++ /dev/null @@ -1,515 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1/proto/operations.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1/proto/operations.proto", - package="google.cloud.dataproc.v1", - syntax="proto3", - serialized_options=b"\n\034com.google.cloud.dataproc.v1B\017OperationsProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n/google/cloud/dataproc_v1/proto/operations.proto\x12\x18google.cloud.dataproc.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x89\x02\n\x16\x43lusterOperationStatus\x12J\n\x05state\x18\x01 \x01(\x0e\x32\x36.google.cloud.dataproc.v1.ClusterOperationStatus.StateB\x03\xe0\x41\x03\x12\x18\n\x0binner_state\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x07\x64\x65tails\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03"8\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03"\xb8\x03\n\x18\x43lusterOperationMetadata\x12\x19\n\x0c\x63luster_name\x18\x07 \x01(\tB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x08 \x01(\tB\x03\xe0\x41\x03\x12\x45\n\x06status\x18\t \x01(\x0b\x32\x30.google.cloud.dataproc.v1.ClusterOperationStatusB\x03\xe0\x41\x03\x12M\n\x0estatus_history\x18\n \x03(\x0b\x32\x30.google.cloud.dataproc.v1.ClusterOperationStatusB\x03\xe0\x41\x03\x12\x1b\n\x0eoperation_type\x18\x0b \x01(\tB\x03\xe0\x41\x03\x12\x18\n\x0b\x64\x65scription\x18\x0c \x01(\tB\x03\xe0\x41\x03\x12S\n\x06labels\x18\r \x03(\x0b\x32>.google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntryB\x03\xe0\x41\x03\x12\x15\n\x08warnings\x18\x0e \x03(\tB\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42s\n\x1c\x63om.google.cloud.dataproc.v1B\x0fOperationsProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3', - dependencies=[ - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_CLUSTEROPERATIONSTATUS_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1.ClusterOperationStatus.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNKNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PENDING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DONE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=383, - serialized_end=439, -) -_sym_db.RegisterEnumDescriptor(_CLUSTEROPERATIONSTATUS_STATE) - - -_CLUSTEROPERATIONSTATUS = _descriptor.Descriptor( - name="ClusterOperationStatus", - full_name="google.cloud.dataproc.v1.ClusterOperationStatus", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1.ClusterOperationStatus.state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="inner_state", - full_name="google.cloud.dataproc.v1.ClusterOperationStatus.inner_state", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="details", - full_name="google.cloud.dataproc.v1.ClusterOperationStatus.details", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state_start_time", - full_name="google.cloud.dataproc.v1.ClusterOperationStatus.state_start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_CLUSTEROPERATIONSTATUS_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=174, - serialized_end=439, -) - - -_CLUSTEROPERATIONMETADATA_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=837, - serialized_end=882, -) - -_CLUSTEROPERATIONMETADATA = _descriptor.Descriptor( - name="ClusterOperationMetadata", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.cluster_name", - index=0, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.cluster_uuid", - index=1, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.status", - index=2, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status_history", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.status_history", - index=3, - number=10, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="operation_type", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.operation_type", - index=4, - number=11, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.description", - index=5, - number=12, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.labels", - index=6, - number=13, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="warnings", - full_name="google.cloud.dataproc.v1.ClusterOperationMetadata.warnings", - index=7, - number=14, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTEROPERATIONMETADATA_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=442, - serialized_end=882, -) - -_CLUSTEROPERATIONSTATUS.fields_by_name[ - "state" -].enum_type = _CLUSTEROPERATIONSTATUS_STATE -_CLUSTEROPERATIONSTATUS.fields_by_name[ - "state_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CLUSTEROPERATIONSTATUS_STATE.containing_type = _CLUSTEROPERATIONSTATUS -_CLUSTEROPERATIONMETADATA_LABELSENTRY.containing_type = _CLUSTEROPERATIONMETADATA -_CLUSTEROPERATIONMETADATA.fields_by_name[ - "status" -].message_type = _CLUSTEROPERATIONSTATUS -_CLUSTEROPERATIONMETADATA.fields_by_name[ - "status_history" -].message_type = _CLUSTEROPERATIONSTATUS -_CLUSTEROPERATIONMETADATA.fields_by_name[ - "labels" -].message_type = _CLUSTEROPERATIONMETADATA_LABELSENTRY -DESCRIPTOR.message_types_by_name["ClusterOperationStatus"] = _CLUSTEROPERATIONSTATUS -DESCRIPTOR.message_types_by_name["ClusterOperationMetadata"] = _CLUSTEROPERATIONMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ClusterOperationStatus = _reflection.GeneratedProtocolMessageType( - "ClusterOperationStatus", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTEROPERATIONSTATUS, - "__module__": "google.cloud.dataproc_v1.proto.operations_pb2", - "__doc__": """The status of the operation. - - Attributes: - state: - Output only. A message containing the operation state. - inner_state: - Output only. A message containing the detailed operation - state. - details: - Output only. A message containing any operation metadata - details. - state_start_time: - Output only. The time this state was entered. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationStatus) - }, -) -_sym_db.RegisterMessage(ClusterOperationStatus) - -ClusterOperationMetadata = _reflection.GeneratedProtocolMessageType( - "ClusterOperationMetadata", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTEROPERATIONMETADATA_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.operations_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationMetadata.LabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTEROPERATIONMETADATA, - "__module__": "google.cloud.dataproc_v1.proto.operations_pb2", - "__doc__": """Metadata describing the operation. - - Attributes: - cluster_name: - Output only. Name of the cluster for the operation. - cluster_uuid: - Output only. Cluster UUID for the operation. - status: - Output only. Current operation status. - status_history: - Output only. The previous operation status. - operation_type: - Output only. The operation type. - description: - Output only. Short description of operation. - labels: - Output only. Labels associated with the operation - warnings: - Output only. Errors encountered during operation execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperationMetadata) - }, -) -_sym_db.RegisterMessage(ClusterOperationMetadata) -_sym_db.RegisterMessage(ClusterOperationMetadata.LabelsEntry) - - -DESCRIPTOR._options = None -_CLUSTEROPERATIONSTATUS.fields_by_name["state"]._options = None -_CLUSTEROPERATIONSTATUS.fields_by_name["inner_state"]._options = None -_CLUSTEROPERATIONSTATUS.fields_by_name["details"]._options = None -_CLUSTEROPERATIONSTATUS.fields_by_name["state_start_time"]._options = None -_CLUSTEROPERATIONMETADATA_LABELSENTRY._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["cluster_name"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["cluster_uuid"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["status"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["status_history"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["operation_type"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["description"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["labels"]._options = None -_CLUSTEROPERATIONMETADATA.fields_by_name["warnings"]._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1/proto/operations_pb2_grpc.py b/google/cloud/dataproc_v1/proto/operations_pb2_grpc.py deleted file mode 100644 index 8a939394..00000000 --- a/google/cloud/dataproc_v1/proto/operations_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/dataproc_v1/proto/shared_pb2.py b/google/cloud/dataproc_v1/proto/shared_pb2.py deleted file mode 100644 index 2b5e305d..00000000 --- a/google/cloud/dataproc_v1/proto/shared_pb2.py +++ /dev/null @@ -1,115 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1/proto/shared.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1/proto/shared.proto", - package="google.cloud.dataproc.v1", - syntax="proto3", - serialized_options=b"\n\034com.google.cloud.dataproc.v1B\013SharedProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b"\n+google/cloud/dataproc_v1/proto/shared.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto*|\n\tComponent\x12\x19\n\x15\x43OMPONENT_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x41NACONDA\x10\x05\x12\x10\n\x0cHIVE_WEBHCAT\x10\x03\x12\x0b\n\x07JUPYTER\x10\x01\x12\n\n\x06PRESTO\x10\x06\x12\x0c\n\x08ZEPPELIN\x10\x04\x12\r\n\tZOOKEEPER\x10\x08\x42o\n\x1c\x63om.google.cloud.dataproc.v1B\x0bSharedProtoP\x01Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataprocb\x06proto3", - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - -_COMPONENT = _descriptor.EnumDescriptor( - name="Component", - full_name="google.cloud.dataproc.v1.Component", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="COMPONENT_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ANACONDA", - index=1, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="HIVE_WEBHCAT", - index=2, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="JUPYTER", - index=3, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PRESTO", - index=4, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ZEPPELIN", - index=5, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ZOOKEEPER", - index=6, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=103, - serialized_end=227, -) -_sym_db.RegisterEnumDescriptor(_COMPONENT) - -Component = enum_type_wrapper.EnumTypeWrapper(_COMPONENT) -COMPONENT_UNSPECIFIED = 0 -ANACONDA = 5 -HIVE_WEBHCAT = 3 -JUPYTER = 1 -PRESTO = 6 -ZEPPELIN = 4 -ZOOKEEPER = 8 - - -DESCRIPTOR.enum_types_by_name["Component"] = _COMPONENT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1/proto/shared_pb2_grpc.py b/google/cloud/dataproc_v1/proto/shared_pb2_grpc.py deleted file mode 100644 index 8a939394..00000000 --- a/google/cloud/dataproc_v1/proto/shared_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py b/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py deleted file mode 100644 index 0ecd5828..00000000 --- a/google/cloud/dataproc_v1/proto/workflow_templates_pb2.py +++ /dev/null @@ -1,3532 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1/proto/workflow_templates.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.dataproc_v1.proto import ( - clusters_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_clusters__pb2, -) -from google.cloud.dataproc_v1.proto import ( - jobs_pb2 as google_dot_cloud_dot_dataproc__v1_dot_proto_dot_jobs__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1/proto/workflow_templates.proto", - package="google.cloud.dataproc.v1", - syntax="proto3", - serialized_options=b"\n\034com.google.cloud.dataproc.v1B\026WorkflowTemplatesProtoP\001Z@google.golang.org/genproto/googleapis/cloud/dataproc/v1;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n7google/cloud/dataproc_v1/proto/workflow_templates.proto\x12\x18google.cloud.dataproc.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a-google/cloud/dataproc_v1/proto/clusters.proto\x1a)google/cloud/dataproc_v1/proto/jobs.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcd\x05\n\x10WorkflowTemplate\x12\x0f\n\x02id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x07version\x18\x03 \x01(\x05\x42\x03\xe0\x41\x01\x12\x34\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x34\n\x0bupdate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12K\n\x06labels\x18\x06 \x03(\x0b\x32\x36.google.cloud.dataproc.v1.WorkflowTemplate.LabelsEntryB\x03\xe0\x41\x01\x12K\n\tplacement\x18\x07 \x01(\x0b\x32\x33.google.cloud.dataproc.v1.WorkflowTemplatePlacementB\x03\xe0\x41\x02\x12\x37\n\x04jobs\x18\x08 \x03(\x0b\x32$.google.cloud.dataproc.v1.OrderedJobB\x03\xe0\x41\x02\x12\x44\n\nparameters\x18\t \x03(\x0b\x32+.google.cloud.dataproc.v1.TemplateParameterB\x03\xe0\x41\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01:\xca\x01\xea\x41\xc6\x01\n(dataproc.googleapis.com/WorkflowTemplate\x12Iprojects/{project}/regions/{region}/workflowTemplates/{workflow_template}\x12Mprojects/{project}/locations/{location}/workflowTemplates/{workflow_template} \x01"\xb4\x01\n\x19WorkflowTemplatePlacement\x12\x43\n\x0fmanaged_cluster\x18\x01 \x01(\x0b\x32(.google.cloud.dataproc.v1.ManagedClusterH\x00\x12\x45\n\x10\x63luster_selector\x18\x02 \x01(\x0b\x32).google.cloud.dataproc.v1.ClusterSelectorH\x00\x42\x0b\n\tplacement"\xe3\x01\n\x0eManagedCluster\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x06\x63onfig\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1.ClusterConfigB\x03\xe0\x41\x02\x12I\n\x06labels\x18\x04 \x03(\x0b\x32\x34.google.cloud.dataproc.v1.ManagedCluster.LabelsEntryB\x03\xe0\x41\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xb5\x01\n\x0f\x43lusterSelector\x12\x11\n\x04zone\x18\x01 \x01(\tB\x03\xe0\x41\x01\x12Y\n\x0e\x63luster_labels\x18\x02 \x03(\x0b\x32<.google.cloud.dataproc.v1.ClusterSelector.ClusterLabelsEntryB\x03\xe0\x41\x02\x1a\x34\n\x12\x43lusterLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x86\x06\n\nOrderedJob\x12\x14\n\x07step_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12>\n\nhadoop_job\x18\x02 \x01(\x0b\x32#.google.cloud.dataproc.v1.HadoopJobB\x03\xe0\x41\x01H\x00\x12<\n\tspark_job\x18\x03 \x01(\x0b\x32".google.cloud.dataproc.v1.SparkJobB\x03\xe0\x41\x01H\x00\x12@\n\x0bpyspark_job\x18\x04 \x01(\x0b\x32$.google.cloud.dataproc.v1.PySparkJobB\x03\xe0\x41\x01H\x00\x12:\n\x08hive_job\x18\x05 \x01(\x0b\x32!.google.cloud.dataproc.v1.HiveJobB\x03\xe0\x41\x01H\x00\x12\x38\n\x07pig_job\x18\x06 \x01(\x0b\x32 .google.cloud.dataproc.v1.PigJobB\x03\xe0\x41\x01H\x00\x12?\n\x0bspark_r_job\x18\x0b \x01(\x0b\x32#.google.cloud.dataproc.v1.SparkRJobB\x03\xe0\x41\x01H\x00\x12\x43\n\rspark_sql_job\x18\x07 \x01(\x0b\x32%.google.cloud.dataproc.v1.SparkSqlJobB\x03\xe0\x41\x01H\x00\x12>\n\npresto_job\x18\x0c \x01(\x0b\x32#.google.cloud.dataproc.v1.PrestoJobB\x03\xe0\x41\x01H\x00\x12\x45\n\x06labels\x18\x08 \x03(\x0b\x32\x30.google.cloud.dataproc.v1.OrderedJob.LabelsEntryB\x03\xe0\x41\x01\x12@\n\nscheduling\x18\t \x01(\x0b\x32\'.google.cloud.dataproc.v1.JobSchedulingB\x03\xe0\x41\x01\x12"\n\x15prerequisite_step_ids\x18\n \x03(\tB\x03\xe0\x41\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08job_type"\x9d\x01\n\x11TemplateParameter\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ields\x18\x02 \x03(\tB\x03\xe0\x41\x02\x12\x18\n\x0b\x64\x65scription\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x46\n\nvalidation\x18\x04 \x01(\x0b\x32-.google.cloud.dataproc.v1.ParameterValidationB\x03\xe0\x41\x01"\xa1\x01\n\x13ParameterValidation\x12:\n\x05regex\x18\x01 \x01(\x0b\x32).google.cloud.dataproc.v1.RegexValidationH\x00\x12;\n\x06values\x18\x02 \x01(\x0b\x32).google.cloud.dataproc.v1.ValueValidationH\x00\x42\x11\n\x0fvalidation_type"\'\n\x0fRegexValidation\x12\x14\n\x07regexes\x18\x01 \x03(\tB\x03\xe0\x41\x02"&\n\x0fValueValidation\x12\x13\n\x06values\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xaf\x05\n\x10WorkflowMetadata\x12\x15\n\x08template\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x07version\x18\x02 \x01(\x05\x42\x03\xe0\x41\x03\x12G\n\x0e\x63reate_cluster\x18\x03 \x01(\x0b\x32*.google.cloud.dataproc.v1.ClusterOperationB\x03\xe0\x41\x03\x12;\n\x05graph\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1.WorkflowGraphB\x03\xe0\x41\x03\x12G\n\x0e\x64\x65lete_cluster\x18\x05 \x01(\x0b\x32*.google.cloud.dataproc.v1.ClusterOperationB\x03\xe0\x41\x03\x12\x44\n\x05state\x18\x06 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.WorkflowMetadata.StateB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_name\x18\x07 \x01(\tB\x03\xe0\x41\x03\x12N\n\nparameters\x18\x08 \x03(\x0b\x32:.google.cloud.dataproc.v1.WorkflowMetadata.ParametersEntry\x12\x33\n\nstart_time\x18\t \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x0b \x01(\tB\x03\xe0\x41\x03\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"8\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03"T\n\x10\x43lusterOperation\x12\x19\n\x0coperation_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x12\n\x05\x65rror\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x11\n\x04\x64one\x18\x03 \x01(\x08\x42\x03\xe0\x41\x03"K\n\rWorkflowGraph\x12:\n\x05nodes\x18\x01 \x03(\x0b\x32&.google.cloud.dataproc.v1.WorkflowNodeB\x03\xe0\x41\x03"\xa3\x02\n\x0cWorkflowNode\x12\x14\n\x07step_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12"\n\x15prerequisite_step_ids\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x44\n\x05state\x18\x05 \x01(\x0e\x32\x30.google.cloud.dataproc.v1.WorkflowNode.NodeStateB\x03\xe0\x41\x03\x12\x12\n\x05\x65rror\x18\x06 \x01(\tB\x03\xe0\x41\x03"j\n\tNodeState\x12\x1a\n\x16NODE_STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x42LOCKED\x10\x01\x12\x0c\n\x08RUNNABLE\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05"\xa4\x01\n\x1d\x43reateWorkflowTemplateRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\x12(dataproc.googleapis.com/WorkflowTemplate\x12\x41\n\x08template\x18\x02 \x01(\x0b\x32*.google.cloud.dataproc.v1.WorkflowTemplateB\x03\xe0\x41\x02"r\n\x1aGetWorkflowTemplateRequest\x12>\n\x04name\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(dataproc.googleapis.com/WorkflowTemplate\x12\x14\n\x07version\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01"\xad\x02\n"InstantiateWorkflowTemplateRequest\x12>\n\x04name\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(dataproc.googleapis.com/WorkflowTemplate\x12\x14\n\x07version\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x65\n\nparameters\x18\x06 \x03(\x0b\x32L.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest.ParametersEntryB\x03\xe0\x41\x01\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xc8\x01\n(InstantiateInlineWorkflowTemplateRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\x12(dataproc.googleapis.com/WorkflowTemplate\x12\x41\n\x08template\x18\x02 \x01(\x0b\x32*.google.cloud.dataproc.v1.WorkflowTemplateB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x03 \x01(\tB\x03\xe0\x41\x01"b\n\x1dUpdateWorkflowTemplateRequest\x12\x41\n\x08template\x18\x01 \x01(\x0b\x32*.google.cloud.dataproc.v1.WorkflowTemplateB\x03\xe0\x41\x02"\x91\x01\n\x1cListWorkflowTemplatesRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\x12(dataproc.googleapis.com/WorkflowTemplate\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"\x81\x01\n\x1dListWorkflowTemplatesResponse\x12\x42\n\ttemplates\x18\x01 \x03(\x0b\x32*.google.cloud.dataproc.v1.WorkflowTemplateB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"u\n\x1d\x44\x65leteWorkflowTemplateRequest\x12>\n\x04name\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(dataproc.googleapis.com/WorkflowTemplate\x12\x14\n\x07version\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x32\xe6\x10\n\x17WorkflowTemplateService\x12\x9b\x02\n\x16\x43reateWorkflowTemplate\x12\x37.google.cloud.dataproc.v1.CreateWorkflowTemplateRequest\x1a*.google.cloud.dataproc.v1.WorkflowTemplate"\x9b\x01\x82\xd3\xe4\x93\x02\x82\x01"5/v1/{parent=projects/*/locations/*}/workflowTemplates:\x08templateZ?"3/v1/{parent=projects/*/regions/*}/workflowTemplates:\x08template\xda\x41\x0fparent,template\x12\xf4\x01\n\x13GetWorkflowTemplate\x12\x34.google.cloud.dataproc.v1.GetWorkflowTemplateRequest\x1a*.google.cloud.dataproc.v1.WorkflowTemplate"{\x82\xd3\xe4\x93\x02n\x12\x35/v1/{name=projects/*/locations/*/workflowTemplates/*}Z5\x12\x33/v1/{name=projects/*/regions/*/workflowTemplates/*}\xda\x41\x04name\x12\xd5\x02\n\x1bInstantiateWorkflowTemplate\x12<.google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest\x1a\x1d.google.longrunning.Operation"\xd8\x01\x82\xd3\xe4\x93\x02\x8c\x01"A/v1/{name=projects/*/locations/*/workflowTemplates/*}:instantiate:\x01*ZD"?/v1/{name=projects/*/regions/*/workflowTemplates/*}:instantiate:\x01*\xda\x41\x04name\xda\x41\x0fname,parameters\xca\x41)\n\x15google.protobuf.Empty\x12\x10WorkflowMetadata\x12\xf4\x02\n!InstantiateInlineWorkflowTemplate\x12\x42.google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest\x1a\x1d.google.longrunning.Operation"\xeb\x01\x82\xd3\xe4\x93\x02\xa6\x01"G/v1/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline:\x08templateZQ"E/v1/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline:\x08template\xda\x41\x0fparent,template\xca\x41)\n\x15google.protobuf.Empty\x12\x10WorkflowMetadata\x12\xa6\x02\n\x16UpdateWorkflowTemplate\x12\x37.google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest\x1a*.google.cloud.dataproc.v1.WorkflowTemplate"\xa6\x01\x82\xd3\xe4\x93\x02\x94\x01\x1a>/v1/{template.name=projects/*/locations/*/workflowTemplates/*}:\x08templateZH\x1a`__. Label **values** - may be empty, but, if present, must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. No more than 32 - labels can be associated with a template. - placement: - Required. WorkflowTemplate scheduling information. - jobs: - Required. The Directed Acyclic Graph of Jobs to submit. - parameters: - Optional. Template parameters whose values are substituted - into the template. Values for parameters must be provided when - the template is instantiated. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.WorkflowTemplate) - }, -) -_sym_db.RegisterMessage(WorkflowTemplate) -_sym_db.RegisterMessage(WorkflowTemplate.LabelsEntry) - -WorkflowTemplatePlacement = _reflection.GeneratedProtocolMessageType( - "WorkflowTemplatePlacement", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWTEMPLATEPLACEMENT, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """Specifies workflow execution target. Either ``managed_cluster`` or - ``cluster_selector`` is required. - - Attributes: - placement: - Required. Specifies where workflow executes; either on a - managed cluster or an existing cluster chosen by labels. - managed_cluster: - A cluster that is managed by the workflow. - cluster_selector: - Optional. A selector that chooses target cluster for jobs - based on metadata. The selector is evaluated at the time each - job is submitted. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.WorkflowTemplatePlacement) - }, -) -_sym_db.RegisterMessage(WorkflowTemplatePlacement) - -ManagedCluster = _reflection.GeneratedProtocolMessageType( - "ManagedCluster", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _MANAGEDCLUSTER_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ManagedCluster.LabelsEntry) - }, - ), - "DESCRIPTOR": _MANAGEDCLUSTER, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """Cluster that is managed by the workflow. - - Attributes: - cluster_name: - Required. The cluster name prefix. A unique cluster name will - be formed by appending a random suffix. The name must contain - only lower-case letters (a-z), numbers (0-9), and hyphens (-). - Must begin with a letter. Cannot begin or end with hyphen. - Must consist of between 2 and 35 characters. - config: - Required. The cluster configuration. - labels: - Optional. The labels to associate with this cluster. Label - keys must be between 1 and 63 characters long. Label values must be between - 1 and 63 characters long. No more than 32 - labels can be associated with a given cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ManagedCluster) - }, -) -_sym_db.RegisterMessage(ManagedCluster) -_sym_db.RegisterMessage(ManagedCluster.LabelsEntry) - -ClusterSelector = _reflection.GeneratedProtocolMessageType( - "ClusterSelector", - (_message.Message,), - { - "ClusterLabelsEntry": _reflection.GeneratedProtocolMessageType( - "ClusterLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERSELECTOR_CLUSTERLABELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterSelector.ClusterLabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTERSELECTOR, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A selector that chooses target cluster for jobs based on metadata. - - Attributes: - zone: - Optional. The zone where workflow process executes. This - parameter does not affect the selection of the cluster. If - unspecified, the zone of the first cluster matching the - selector is used. - cluster_labels: - Required. The cluster labels. Cluster must have all labels to - match. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterSelector) - }, -) -_sym_db.RegisterMessage(ClusterSelector) -_sym_db.RegisterMessage(ClusterSelector.ClusterLabelsEntry) - -OrderedJob = _reflection.GeneratedProtocolMessageType( - "OrderedJob", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _ORDEREDJOB_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.OrderedJob.LabelsEntry) - }, - ), - "DESCRIPTOR": _ORDEREDJOB, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A job executed by the workflow. - - Attributes: - step_id: - Required. The step id. The id must be unique among all jobs - within the template. The step id is used as prefix for job - id, as job ``goog-dataproc-workflow-step-id`` label, and in [p - rerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prereq - uisite_step_ids] field from other steps. The id must contain - only letters (a-z, A-Z), numbers (0-9), underscores (_), and - hyphens (-). Cannot begin or end with underscore or hyphen. - Must consist of between 3 and 50 characters. - job_type: - Required. The job definition. - hadoop_job: - Optional. Job is a Hadoop job. - spark_job: - Optional. Job is a Spark job. - pyspark_job: - Optional. Job is a PySpark job. - hive_job: - Optional. Job is a Hive job. - pig_job: - Optional. Job is a Pig job. - spark_r_job: - Optional. Job is a SparkR job. - spark_sql_job: - Optional. Job is a SparkSql job. - presto_job: - Optional. Job is a Presto job. - labels: - Optional. The labels to associate with this job. Label keys - must be between 1 and 63 characters long. Label values must be between - 1 and 63 characters long. No more than 32 labels can be - associated with a given job. - scheduling: - Optional. Job scheduling configuration. - prerequisite_step_ids: - Optional. The optional list of prerequisite job step_ids. If - not specified, the job will start at the beginning of - workflow. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.OrderedJob) - }, -) -_sym_db.RegisterMessage(OrderedJob) -_sym_db.RegisterMessage(OrderedJob.LabelsEntry) - -TemplateParameter = _reflection.GeneratedProtocolMessageType( - "TemplateParameter", - (_message.Message,), - { - "DESCRIPTOR": _TEMPLATEPARAMETER, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A configurable parameter that replaces one or more fields in the - template. Parameterizable fields: - Labels - File uris - Job - properties - Job arguments - Script variables - Main class (in - HadoopJob and SparkJob) - Zone (in ClusterSelector) - - Attributes: - name: - Required. Parameter name. The parameter name is used as the - key, and paired with the parameter value, which are passed to - the template when the template is instantiated. The name must - contain only capital letters (A-Z), numbers (0-9), and - underscores (_), and must not start with a number. The maximum - length is 40 characters. - fields: - Required. Paths to all fields that the parameter replaces. A - field is allowed to appear in at most one parameter’s list of - field paths. A field path is similar in syntax to a - [google.protobuf.FieldMask][google.protobuf.FieldMask]. For - example, a field path that references the zone field of a - workflow template’s cluster selector would be specified as - ``placement.clusterSelector.zone``. Also, field paths can - reference fields using the following syntax: - Values in - maps can be referenced by key: - labels[‘key’] - - placement.clusterSelector.clusterLabels[‘key’] - - placement.managedCluster.labels[‘key’] - - placement.clusterSelector.clusterLabels[‘key’] - - jobs[‘step-id’].labels[‘key’] - Jobs in the jobs list can be - referenced by step-id: - jobs[‘step- - id’].hadoopJob.mainJarFileUri - jobs[‘step- - id’].hiveJob.queryFileUri - jobs[‘step- - id’].pySparkJob.mainPythonFileUri - jobs[‘step- - id’].hadoopJob.jarFileUris[0] - jobs[‘step- - id’].hadoopJob.archiveUris[0] - jobs[‘step- - id’].hadoopJob.fileUris[0] - jobs[‘step- - id’].pySparkJob.pythonFileUris[0] - Items in repeated fields - can be referenced by a zero-based index: - jobs[‘step- - id’].sparkJob.args[0] - Other examples: - jobs[‘step- - id’].hadoopJob.properties[‘key’] - jobs[‘step- - id’].hadoopJob.args[0] - jobs[‘step- - id’].hiveJob.scriptVariables[‘key’] - jobs[‘step- - id’].hadoopJob.mainJarFileUri - - placement.clusterSelector.zone It may not be possible to - parameterize maps and repeated fields in their entirety since - only individual map values and individual items in repeated - fields can be referenced. For example, the following field - paths are invalid: - placement.clusterSelector.clusterLabels - - jobs[‘step-id’].sparkJob.args - description: - Optional. Brief description of the parameter. Must not exceed - 1024 characters. - validation: - Optional. Validation rules to be applied to this parameter’s - value. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.TemplateParameter) - }, -) -_sym_db.RegisterMessage(TemplateParameter) - -ParameterValidation = _reflection.GeneratedProtocolMessageType( - "ParameterValidation", - (_message.Message,), - { - "DESCRIPTOR": _PARAMETERVALIDATION, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """Configuration for parameter validation. - - Attributes: - validation_type: - Required. The type of validation to be performed. - regex: - Validation based on regular expressions. - values: - Validation based on a list of allowed values. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ParameterValidation) - }, -) -_sym_db.RegisterMessage(ParameterValidation) - -RegexValidation = _reflection.GeneratedProtocolMessageType( - "RegexValidation", - (_message.Message,), - { - "DESCRIPTOR": _REGEXVALIDATION, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """Validation based on regular expressions. - - Attributes: - regexes: - Required. RE2 regular expressions used to validate the - parameter’s value. The value must match the regex in its - entirety (substring matches are not sufficient). - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.RegexValidation) - }, -) -_sym_db.RegisterMessage(RegexValidation) - -ValueValidation = _reflection.GeneratedProtocolMessageType( - "ValueValidation", - (_message.Message,), - { - "DESCRIPTOR": _VALUEVALIDATION, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """Validation based on a list of allowed values. - - Attributes: - values: - Required. List of allowed values for the parameter. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ValueValidation) - }, -) -_sym_db.RegisterMessage(ValueValidation) - -WorkflowMetadata = _reflection.GeneratedProtocolMessageType( - "WorkflowMetadata", - (_message.Message,), - { - "ParametersEntry": _reflection.GeneratedProtocolMessageType( - "ParametersEntry", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWMETADATA_PARAMETERSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.WorkflowMetadata.ParametersEntry) - }, - ), - "DESCRIPTOR": _WORKFLOWMETADATA, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A Dataproc workflow template resource. - - Attributes: - template: - Output only. The resource name of the workflow template as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates``, the resource name of - the template has the following format: ``projects/{proje - ct_id}/regions/{region}/workflowTemplates/{template_id}`` - - For ``projects.locations.workflowTemplates``, the resource - name of the template has the following format: ``project - s/{project_id}/locations/{location}/workflowTemplates/{templat - e_id}`` - version: - Output only. The version of template at the time of workflow - instantiation. - create_cluster: - Output only. The create cluster operation metadata. - graph: - Output only. The workflow graph. - delete_cluster: - Output only. The delete cluster operation metadata. - state: - Output only. The workflow state. - cluster_name: - Output only. The name of the target cluster. - parameters: - Map from parameter names to values that were used for those - parameters. - start_time: - Output only. Workflow start time. - end_time: - Output only. Workflow end time. - cluster_uuid: - Output only. The UUID of target cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.WorkflowMetadata) - }, -) -_sym_db.RegisterMessage(WorkflowMetadata) -_sym_db.RegisterMessage(WorkflowMetadata.ParametersEntry) - -ClusterOperation = _reflection.GeneratedProtocolMessageType( - "ClusterOperation", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTEROPERATION, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """The cluster operation triggered by a workflow. - - Attributes: - operation_id: - Output only. The id of the cluster operation. - error: - Output only. Error, if operation failed. - done: - Output only. Indicates the operation is done. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ClusterOperation) - }, -) -_sym_db.RegisterMessage(ClusterOperation) - -WorkflowGraph = _reflection.GeneratedProtocolMessageType( - "WorkflowGraph", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWGRAPH, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """The workflow graph. - - Attributes: - nodes: - Output only. The workflow nodes. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.WorkflowGraph) - }, -) -_sym_db.RegisterMessage(WorkflowGraph) - -WorkflowNode = _reflection.GeneratedProtocolMessageType( - "WorkflowNode", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWNODE, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """The workflow node. - - Attributes: - step_id: - Output only. The name of the node. - prerequisite_step_ids: - Output only. Node’s prerequisite nodes. - job_id: - Output only. The job id; populated after the node enters - RUNNING state. - state: - Output only. The node state. - error: - Output only. The error detail. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.WorkflowNode) - }, -) -_sym_db.RegisterMessage(WorkflowNode) - -CreateWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "CreateWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to create a workflow template. - - Attributes: - parent: - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates,create``, the resource - name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.workflowTemplates.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template: - Required. The Dataproc workflow template to create. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.CreateWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(CreateWorkflowTemplateRequest) - -GetWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "GetWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to fetch a workflow template. - - Attributes: - name: - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates.get``, the resource name - of the template has the following format: ``projects/{pr - oject_id}/regions/{region}/workflowTemplates/{template_id}`` - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates - /{template_id}`` - version: - Optional. The version of workflow template to retrieve. Only - previously instantiated versions can be retrieved. If - unspecified, retrieves the current version. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.GetWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(GetWorkflowTemplateRequest) - -InstantiateWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "InstantiateWorkflowTemplateRequest", - (_message.Message,), - { - "ParametersEntry": _reflection.GeneratedProtocolMessageType( - "ParametersEntry", - (_message.Message,), - { - "DESCRIPTOR": _INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest.ParametersEntry) - }, - ), - "DESCRIPTOR": _INSTANTIATEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to instantiate a workflow template. - - Attributes: - name: - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates.instantiate``, the - resource name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{te - mplate_id}`` - For - ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: ``p - rojects/{project_id}/locations/{location}/workflowTemplates/{t - emplate_id}`` - version: - Optional. The version of workflow template to instantiate. If - specified, the workflow will be instantiated only if the - current version of the workflow template has the supplied - version. This option cannot be used to instantiate a previous - version of workflow template. - request_id: - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates risk - of concurrent instances started due to retries. It is - recommended to always set this value to a `UUID `__. The tag - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - parameters: - Optional. Map from parameter names to values that should be - used for those parameters. Values may not exceed 100 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.InstantiateWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(InstantiateWorkflowTemplateRequest) -_sym_db.RegisterMessage(InstantiateWorkflowTemplateRequest.ParametersEntry) - -InstantiateInlineWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "InstantiateInlineWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to instantiate an inline workflow template. - - Attributes: - parent: - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates,instantiateinline``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template: - Required. The workflow template to instantiate. - request_id: - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates risk - of concurrent instances started due to retries. It is - recommended to always set this value to a `UUID `__. The tag - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.InstantiateInlineWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(InstantiateInlineWorkflowTemplateRequest) - -UpdateWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "UpdateWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to update a workflow template. - - Attributes: - template: - Required. The updated workflow template. The - ``template.version`` field must match the current version. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.UpdateWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(UpdateWorkflowTemplateRequest) - -ListWorkflowTemplatesRequest = _reflection.GeneratedProtocolMessageType( - "ListWorkflowTemplatesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTWORKFLOWTEMPLATESREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to list workflow templates in a project. - - Attributes: - parent: - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates,list``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.workflowTemplates.list``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size: - Optional. The maximum number of results to return in each - response. - page_token: - Optional. The page token, returned by a previous call, to - request the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListWorkflowTemplatesRequest) - }, -) -_sym_db.RegisterMessage(ListWorkflowTemplatesRequest) - -ListWorkflowTemplatesResponse = _reflection.GeneratedProtocolMessageType( - "ListWorkflowTemplatesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTWORKFLOWTEMPLATESRESPONSE, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A response to a request to list workflow templates in a project. - - Attributes: - templates: - Output only. WorkflowTemplates list. - next_page_token: - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the page_token in a subsequent - ListWorkflowTemplatesRequest. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.ListWorkflowTemplatesResponse) - }, -) -_sym_db.RegisterMessage(ListWorkflowTemplatesResponse) - -DeleteWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "DeleteWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1.proto.workflow_templates_pb2", - "__doc__": """A request to delete a workflow template. Currently started workflows - will remain running. - - Attributes: - name: - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates.delete``, the resource - name of the template has the following format: ``project - s/{project_id}/regions/{region}/workflowTemplates/{template_id - }`` - For - ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: ``p - rojects/{project_id}/locations/{location}/workflowTemplates/{t - emplate_id}`` - version: - Optional. The version of workflow template to delete. If - specified, will only delete the template if the current server - version matches specified version. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1.DeleteWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(DeleteWorkflowTemplateRequest) - - -DESCRIPTOR._options = None -_WORKFLOWTEMPLATE_LABELSENTRY._options = None -_WORKFLOWTEMPLATE.fields_by_name["id"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["name"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["version"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["create_time"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["update_time"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["labels"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["placement"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["jobs"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["parameters"]._options = None -_WORKFLOWTEMPLATE._options = None -_MANAGEDCLUSTER_LABELSENTRY._options = None -_MANAGEDCLUSTER.fields_by_name["cluster_name"]._options = None -_MANAGEDCLUSTER.fields_by_name["config"]._options = None -_MANAGEDCLUSTER.fields_by_name["labels"]._options = None -_CLUSTERSELECTOR_CLUSTERLABELSENTRY._options = None -_CLUSTERSELECTOR.fields_by_name["zone"]._options = None -_CLUSTERSELECTOR.fields_by_name["cluster_labels"]._options = None -_ORDEREDJOB_LABELSENTRY._options = None -_ORDEREDJOB.fields_by_name["step_id"]._options = None -_ORDEREDJOB.fields_by_name["hadoop_job"]._options = None -_ORDEREDJOB.fields_by_name["spark_job"]._options = None -_ORDEREDJOB.fields_by_name["pyspark_job"]._options = None -_ORDEREDJOB.fields_by_name["hive_job"]._options = None -_ORDEREDJOB.fields_by_name["pig_job"]._options = None -_ORDEREDJOB.fields_by_name["spark_r_job"]._options = None -_ORDEREDJOB.fields_by_name["spark_sql_job"]._options = None -_ORDEREDJOB.fields_by_name["presto_job"]._options = None -_ORDEREDJOB.fields_by_name["labels"]._options = None -_ORDEREDJOB.fields_by_name["scheduling"]._options = None -_ORDEREDJOB.fields_by_name["prerequisite_step_ids"]._options = None -_TEMPLATEPARAMETER.fields_by_name["name"]._options = None -_TEMPLATEPARAMETER.fields_by_name["fields"]._options = None -_TEMPLATEPARAMETER.fields_by_name["description"]._options = None -_TEMPLATEPARAMETER.fields_by_name["validation"]._options = None -_REGEXVALIDATION.fields_by_name["regexes"]._options = None -_VALUEVALIDATION.fields_by_name["values"]._options = None -_WORKFLOWMETADATA_PARAMETERSENTRY._options = None -_WORKFLOWMETADATA.fields_by_name["template"]._options = None -_WORKFLOWMETADATA.fields_by_name["version"]._options = None -_WORKFLOWMETADATA.fields_by_name["create_cluster"]._options = None -_WORKFLOWMETADATA.fields_by_name["graph"]._options = None -_WORKFLOWMETADATA.fields_by_name["delete_cluster"]._options = None -_WORKFLOWMETADATA.fields_by_name["state"]._options = None -_WORKFLOWMETADATA.fields_by_name["cluster_name"]._options = None -_WORKFLOWMETADATA.fields_by_name["start_time"]._options = None -_WORKFLOWMETADATA.fields_by_name["end_time"]._options = None -_WORKFLOWMETADATA.fields_by_name["cluster_uuid"]._options = None -_CLUSTEROPERATION.fields_by_name["operation_id"]._options = None -_CLUSTEROPERATION.fields_by_name["error"]._options = None -_CLUSTEROPERATION.fields_by_name["done"]._options = None -_WORKFLOWGRAPH.fields_by_name["nodes"]._options = None -_WORKFLOWNODE.fields_by_name["step_id"]._options = None -_WORKFLOWNODE.fields_by_name["prerequisite_step_ids"]._options = None -_WORKFLOWNODE.fields_by_name["job_id"]._options = None -_WORKFLOWNODE.fields_by_name["state"]._options = None -_WORKFLOWNODE.fields_by_name["error"]._options = None -_CREATEWORKFLOWTEMPLATEREQUEST.fields_by_name["parent"]._options = None -_CREATEWORKFLOWTEMPLATEREQUEST.fields_by_name["template"]._options = None -_GETWORKFLOWTEMPLATEREQUEST.fields_by_name["name"]._options = None -_GETWORKFLOWTEMPLATEREQUEST.fields_by_name["version"]._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name["name"]._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name["version"]._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name["request_id"]._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name["parameters"]._options = None -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST.fields_by_name["parent"]._options = None -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST.fields_by_name["template"]._options = None -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST.fields_by_name["request_id"]._options = None -_UPDATEWORKFLOWTEMPLATEREQUEST.fields_by_name["template"]._options = None -_LISTWORKFLOWTEMPLATESREQUEST.fields_by_name["parent"]._options = None -_LISTWORKFLOWTEMPLATESREQUEST.fields_by_name["page_size"]._options = None -_LISTWORKFLOWTEMPLATESREQUEST.fields_by_name["page_token"]._options = None -_LISTWORKFLOWTEMPLATESRESPONSE.fields_by_name["templates"]._options = None -_LISTWORKFLOWTEMPLATESRESPONSE.fields_by_name["next_page_token"]._options = None -_DELETEWORKFLOWTEMPLATEREQUEST.fields_by_name["name"]._options = None -_DELETEWORKFLOWTEMPLATEREQUEST.fields_by_name["version"]._options = None - -_WORKFLOWTEMPLATESERVICE = _descriptor.ServiceDescriptor( - name="WorkflowTemplateService", - full_name="google.cloud.dataproc.v1.WorkflowTemplateService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=5325, - serialized_end=7475, - methods=[ - _descriptor.MethodDescriptor( - name="CreateWorkflowTemplate", - full_name="google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate", - index=0, - containing_service=None, - input_type=_CREATEWORKFLOWTEMPLATEREQUEST, - output_type=_WORKFLOWTEMPLATE, - serialized_options=b'\202\323\344\223\002\202\001"5/v1/{parent=projects/*/locations/*}/workflowTemplates:\010templateZ?"3/v1/{parent=projects/*/regions/*}/workflowTemplates:\010template\332A\017parent,template', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetWorkflowTemplate", - full_name="google.cloud.dataproc.v1.WorkflowTemplateService.GetWorkflowTemplate", - index=1, - containing_service=None, - input_type=_GETWORKFLOWTEMPLATEREQUEST, - output_type=_WORKFLOWTEMPLATE, - serialized_options=b"\202\323\344\223\002n\0225/v1/{name=projects/*/locations/*/workflowTemplates/*}Z5\0223/v1/{name=projects/*/regions/*/workflowTemplates/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="InstantiateWorkflowTemplate", - full_name="google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate", - index=2, - containing_service=None, - input_type=_INSTANTIATEWORKFLOWTEMPLATEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002\214\001"A/v1/{name=projects/*/locations/*/workflowTemplates/*}:instantiate:\001*ZD"?/v1/{name=projects/*/regions/*/workflowTemplates/*}:instantiate:\001*\332A\004name\332A\017name,parameters\312A)\n\025google.protobuf.Empty\022\020WorkflowMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="InstantiateInlineWorkflowTemplate", - full_name="google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateInlineWorkflowTemplate", - index=3, - containing_service=None, - input_type=_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002\246\001"G/v1/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline:\010templateZQ"E/v1/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline:\010template\332A\017parent,template\312A)\n\025google.protobuf.Empty\022\020WorkflowMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateWorkflowTemplate", - full_name="google.cloud.dataproc.v1.WorkflowTemplateService.UpdateWorkflowTemplate", - index=4, - containing_service=None, - input_type=_UPDATEWORKFLOWTEMPLATEREQUEST, - output_type=_WORKFLOWTEMPLATE, - serialized_options=b"\202\323\344\223\002\224\001\032>/v1/{template.name=projects/*/locations/*/workflowTemplates/*}:\010templateZH\032 None: + """Instantiate the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = AutoscalingPolicyServiceClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def create_autoscaling_policy( + self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + The request object. A request to create an autoscaling + policy. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The autoscaling policy to + create. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_autoscaling_policy( + self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + The request object. A request to update an autoscaling + policy. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The updated autoscaling + policy. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("policy.name", request.policy.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_autoscaling_policy( + self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + The request object. A request to fetch an autoscaling + policy. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_autoscaling_policies( + self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesAsyncPager: + r"""Lists autoscaling policies in the project. + + Args: + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The request object. A request to list autoscaling + policies in a project. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListAutoscalingPoliciesAsyncPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_autoscaling_policies, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAutoscalingPoliciesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_autoscaling_policy( + self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoscalingPolicyServiceAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py new file mode 100644 index 00000000..8bf745b2 --- /dev/null +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py @@ -0,0 +1,688 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1.types import autoscaling_policies + +from .transports.base import AutoscalingPolicyServiceTransport +from .transports.grpc import AutoscalingPolicyServiceGrpcTransport +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +class AutoscalingPolicyServiceClientMeta(type): + """Metaclass for the AutoscalingPolicyService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] + _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport + _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[AutoscalingPolicyServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoscalingPolicyServiceClient(metaclass=AutoscalingPolicyServiceClientMeta): + """The API interface for managing autoscaling policies in the + Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def autoscaling_policy_path( + project: str, location: str, autoscaling_policy: str, + ) -> str: + """Return a fully-qualified autoscaling_policy string.""" + return "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format( + project=project, location=location, autoscaling_policy=autoscaling_policy, + ) + + @staticmethod + def parse_autoscaling_policy_path(path: str) -> Dict[str, str]: + """Parse a autoscaling_policy path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/autoscalingPolicies/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AutoscalingPolicyServiceTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoscalingPolicyServiceTransport): + # transport is a AutoscalingPolicyServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def create_autoscaling_policy( + self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + The request object. A request to create an autoscaling + policy. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The autoscaling policy to + create. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, policy]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.CreateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.CreateAutoscalingPolicyRequest): + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_autoscaling_policy + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_autoscaling_policy( + self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + The request object. A request to update an autoscaling + policy. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The updated autoscaling + policy. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([policy]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.UpdateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.UpdateAutoscalingPolicyRequest): + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_autoscaling_policy + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("policy.name", request.policy.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_autoscaling_policy( + self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + The request object. A request to fetch an autoscaling + policy. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.GetAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.GetAutoscalingPolicyRequest): + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_autoscaling_policies( + self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesPager: + r"""Lists autoscaling policies in the project. + + Args: + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The request object. A request to list autoscaling + policies in a project. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListAutoscalingPoliciesPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.ListAutoscalingPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.ListAutoscalingPoliciesRequest): + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_autoscaling_policies + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAutoscalingPoliciesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_autoscaling_policy( + self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.DeleteAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.DeleteAutoscalingPolicyRequest): + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_autoscaling_policy + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoscalingPolicyServiceClient",) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py new file mode 100644 index 00000000..a246d08f --- /dev/null +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/pagers.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1.types import autoscaling_policies + + +class ListAutoscalingPoliciesPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The initial request object. + response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]: + for page in self.pages: + yield from page.policies + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAutoscalingPoliciesAsyncPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse] + ], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The initial request object. + response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]: + async def async_generator(): + async for page in self.pages: + for response in page.policies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py new file mode 100644 index 00000000..8a17b892 --- /dev/null +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoscalingPolicyServiceTransport +from .grpc import AutoscalingPolicyServiceGrpcTransport +from .grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] +_transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport +_transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport + + +__all__ = ( + "AutoscalingPolicyServiceTransport", + "AutoscalingPolicyServiceGrpcTransport", + "AutoscalingPolicyServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py new file mode 100644 index 00000000..0c609a8b --- /dev/null +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class AutoscalingPolicyServiceTransport(abc.ABC): + """Abstract transport class for AutoscalingPolicyService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_autoscaling_policy: gapic_v1.method.wrap_method( + self.create_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ), + self.update_autoscaling_policy: gapic_v1.method.wrap_method( + self.update_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.get_autoscaling_policy: gapic_v1.method.wrap_method( + self.get_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.list_autoscaling_policies: gapic_v1.method.wrap_method( + self.list_autoscaling_policies, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.delete_autoscaling_policy: gapic_v1.method.wrap_method( + self.delete_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ), + } + + @property + def create_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + typing.Union[ + autoscaling_policies.AutoscalingPolicy, + typing.Awaitable[autoscaling_policies.AutoscalingPolicy], + ], + ]: + raise NotImplementedError() + + @property + def update_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + typing.Union[ + autoscaling_policies.AutoscalingPolicy, + typing.Awaitable[autoscaling_policies.AutoscalingPolicy], + ], + ]: + raise NotImplementedError() + + @property + def get_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + typing.Union[ + autoscaling_policies.AutoscalingPolicy, + typing.Awaitable[autoscaling_policies.AutoscalingPolicy], + ], + ]: + raise NotImplementedError() + + @property + def list_autoscaling_policies( + self, + ) -> typing.Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + typing.Union[ + autoscaling_policies.ListAutoscalingPoliciesResponse, + typing.Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("AutoscalingPolicyServiceTransport",) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py new file mode 100644 index 00000000..d4fbfe1b --- /dev/null +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import AutoscalingPolicyServiceTransport + + +class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport): + """gRPC backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def create_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy, + ]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_autoscaling_policy" not in self._stubs: + self._stubs["create_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/CreateAutoscalingPolicy", + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["create_autoscaling_policy"] + + @property + def update_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy, + ]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_autoscaling_policy" not in self._stubs: + self._stubs["update_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/UpdateAutoscalingPolicy", + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["update_autoscaling_policy"] + + @property + def get_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy, + ]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_autoscaling_policy" not in self._stubs: + self._stubs["get_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/GetAutoscalingPolicy", + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["get_autoscaling_policy"] + + @property + def list_autoscaling_policies( + self, + ) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + autoscaling_policies.ListAutoscalingPoliciesResponse, + ]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + ~.ListAutoscalingPoliciesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_autoscaling_policies" not in self._stubs: + self._stubs["list_autoscaling_policies"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/ListAutoscalingPolicies", + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs["list_autoscaling_policies"] + + @property + def delete_autoscaling_policy( + self, + ) -> Callable[[autoscaling_policies.DeleteAutoscalingPolicyRequest], empty.Empty]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_autoscaling_policy" not in self._stubs: + self._stubs["delete_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/DeleteAutoscalingPolicy", + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_autoscaling_policy"] + + +__all__ = ("AutoscalingPolicyServiceGrpcTransport",) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..1eb47af4 --- /dev/null +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import AutoscalingPolicyServiceTransport +from .grpc import AutoscalingPolicyServiceGrpcTransport + + +class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTransport): + """gRPC AsyncIO backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def create_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy], + ]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_autoscaling_policy" not in self._stubs: + self._stubs["create_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/CreateAutoscalingPolicy", + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["create_autoscaling_policy"] + + @property + def update_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy], + ]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_autoscaling_policy" not in self._stubs: + self._stubs["update_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/UpdateAutoscalingPolicy", + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["update_autoscaling_policy"] + + @property + def get_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy], + ]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_autoscaling_policy" not in self._stubs: + self._stubs["get_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/GetAutoscalingPolicy", + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["get_autoscaling_policy"] + + @property + def list_autoscaling_policies( + self, + ) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse], + ]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + Awaitable[~.ListAutoscalingPoliciesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_autoscaling_policies" not in self._stubs: + self._stubs["list_autoscaling_policies"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/ListAutoscalingPolicies", + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs["list_autoscaling_policies"] + + @property + def delete_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_autoscaling_policy" not in self._stubs: + self._stubs["delete_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.AutoscalingPolicyService/DeleteAutoscalingPolicy", + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_autoscaling_policy"] + + +__all__ = ("AutoscalingPolicyServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/__init__.py b/google/cloud/dataproc_v1/services/cluster_controller/__init__.py new file mode 100644 index 00000000..99ce2997 --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import ClusterControllerClient +from .async_client import ClusterControllerAsyncClient + +__all__ = ( + "ClusterControllerClient", + "ClusterControllerAsyncClient", +) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py new file mode 100644 index 00000000..1ea1637c --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py @@ -0,0 +1,836 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1.services.cluster_controller import pagers +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import operations +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import ClusterControllerTransport +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport +from .client import ClusterControllerClient + + +class ClusterControllerAsyncClient: + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + _client: ClusterControllerClient + + DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = ClusterControllerClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(ClusterControllerClient).get_transport_class, type(ClusterControllerClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = ClusterControllerClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def create_cluster( + self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.CreateClusterRequest`): + The request object. A request to create a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.UpdateClusterRequest`): + The request object. A request to update a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + change autoscaling policies
+ This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, region, cluster_name, cluster, update_mask] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.DeleteClusterRequest`): + The request object. A request to delete a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster_name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (:class:`~.clusters.GetClusterRequest`): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.clusters.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster_name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_clusters( + self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersAsyncPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (:class:`~.clusters.ListClustersRequest`): + The request object. A request to list the clusters in a + project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListClustersAsyncPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, filter]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListClustersAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def diagnose_cluster( + self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Args: + request (:class:`~.clusters.DiagnoseClusterRequest`): + The request object. A request to collect cluster + diagnostic information. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.DiagnoseClusterResults``: The + location of diagnostic output. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster_name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.DiagnoseClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.diagnose_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.DiagnoseClusterResults, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/client.py b/google/cloud/dataproc_v1/services/cluster_controller/client.py new file mode 100644 index 00000000..7f895f39 --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/client.py @@ -0,0 +1,932 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1.services.cluster_controller import pagers +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import operations +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import ClusterControllerTransport +from .transports.grpc import ClusterControllerGrpcTransport +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +class ClusterControllerClientMeta(type): + """Metaclass for the ClusterController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ClusterControllerTransport]] + _transport_registry["grpc"] = ClusterControllerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[ClusterControllerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterControllerClient(metaclass=ClusterControllerClientMeta): + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ClusterControllerTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterControllerTransport): + # transport is a ClusterControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def create_cluster( + self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.CreateClusterRequest`): + The request object. A request to create a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.CreateClusterRequest): + request = clusters.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def update_cluster( + self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.UpdateClusterRequest`): + The request object. A request to update a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + change autoscaling policies
+ This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [project_id, region, cluster_name, cluster, update_mask] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.UpdateClusterRequest): + request = clusters.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.DeleteClusterRequest`): + The request object. A request to delete a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DeleteClusterRequest): + request = clusters.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def get_cluster( + self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (:class:`~.clusters.GetClusterRequest`): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.clusters.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.GetClusterRequest): + request = clusters.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_clusters( + self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (:class:`~.clusters.ListClustersRequest`): + The request object. A request to list the clusters in a + project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListClustersPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.ListClustersRequest): + request = clusters.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListClustersPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def diagnose_cluster( + self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Args: + request (:class:`~.clusters.DiagnoseClusterRequest`): + The request object. A request to collect cluster + diagnostic information. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.DiagnoseClusterResults``: The + location of diagnostic output. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DiagnoseClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DiagnoseClusterRequest): + request = clusters.DiagnoseClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.DiagnoseClusterResults, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterControllerClient",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/pagers.py b/google/cloud/dataproc_v1/services/cluster_controller/pagers.py new file mode 100644 index 00000000..c5f0fbdf --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/pagers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1.types import clusters + + +class ListClustersPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`~.clusters.ListClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`~.clusters.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., clusters.ListClustersResponse], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.clusters.ListClustersRequest`): + The initial request object. + response (:class:`~.clusters.ListClustersResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[clusters.Cluster]: + for page in self.pages: + yield from page.clusters + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListClustersAsyncPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`~.clusters.ListClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`~.clusters.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[clusters.ListClustersResponse]], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.clusters.ListClustersRequest`): + The initial request object. + response (:class:`~.clusters.ListClustersResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[clusters.Cluster]: + async def async_generator(): + async for page in self.pages: + for response in page.clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py new file mode 100644 index 00000000..9aa597b6 --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterControllerTransport +from .grpc import ClusterControllerGrpcTransport +from .grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] +_transport_registry["grpc"] = ClusterControllerGrpcTransport +_transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport + + +__all__ = ( + "ClusterControllerTransport", + "ClusterControllerGrpcTransport", + "ClusterControllerGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py new file mode 100644 index 00000000..993de639 --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class ClusterControllerTransport(abc.ABC): + """Abstract transport class for ClusterController.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.diagnose_cluster: gapic_v1.method.wrap_method( + self.diagnose_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> typing.Callable[ + [clusters.CreateClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> typing.Callable[ + [clusters.UpdateClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> typing.Callable[ + [clusters.DeleteClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> typing.Callable[ + [clusters.GetClusterRequest], + typing.Union[clusters.Cluster, typing.Awaitable[clusters.Cluster]], + ]: + raise NotImplementedError() + + @property + def list_clusters( + self, + ) -> typing.Callable[ + [clusters.ListClustersRequest], + typing.Union[ + clusters.ListClustersResponse, + typing.Awaitable[clusters.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def diagnose_cluster( + self, + ) -> typing.Callable[ + [clusters.DiagnoseClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + +__all__ = ("ClusterControllerTransport",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py new file mode 100644 index 00000000..46f0b416 --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import ClusterControllerTransport + + +class ClusterControllerGrpcTransport(ClusterControllerTransport): + """gRPC backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_cluster( + self, + ) -> Callable[[clusters.CreateClusterRequest], operations.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/CreateCluster", + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[[clusters.UpdateClusterRequest], operations.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[[clusters.DeleteClusterRequest], operations.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def get_cluster(self) -> Callable[[clusters.GetClusterRequest], clusters.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/GetCluster", + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[[clusters.ListClustersRequest], clusters.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/ListClusters", + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def diagnose_cluster( + self, + ) -> Callable[[clusters.DiagnoseClusterRequest], operations.Operation]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Returns: + Callable[[~.DiagnoseClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "diagnose_cluster" not in self._stubs: + self._stubs["diagnose_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["diagnose_cluster"] + + +__all__ = ("ClusterControllerGrpcTransport",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..e8e49b6e --- /dev/null +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py @@ -0,0 +1,395 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import ClusterControllerTransport +from .grpc import ClusterControllerGrpcTransport + + +class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): + """gRPC AsyncIO backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_cluster( + self, + ) -> Callable[[clusters.CreateClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/CreateCluster", + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[[clusters.UpdateClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/UpdateCluster", + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[[clusters.DeleteClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/DeleteCluster", + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def get_cluster( + self, + ) -> Callable[[clusters.GetClusterRequest], Awaitable[clusters.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/GetCluster", + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[ + [clusters.ListClustersRequest], Awaitable[clusters.ListClustersResponse] + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/ListClusters", + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def diagnose_cluster( + self, + ) -> Callable[[clusters.DiagnoseClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains + `DiagnoseClusterResults `__. + + Returns: + Callable[[~.DiagnoseClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "diagnose_cluster" not in self._stubs: + self._stubs["diagnose_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.ClusterController/DiagnoseCluster", + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["diagnose_cluster"] + + +__all__ = ("ClusterControllerGrpcAsyncIOTransport",) diff --git a/google/__init__.py b/google/cloud/dataproc_v1/services/job_controller/__init__.py similarity index 71% rename from google/__init__.py rename to google/cloud/dataproc_v1/services/job_controller/__init__.py index 9a1b64a6..5bb83207 100644 --- a/google/__init__.py +++ b/google/cloud/dataproc_v1/services/job_controller/__init__.py @@ -1,24 +1,24 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import JobControllerClient +from .async_client import JobControllerAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "JobControllerClient", + "JobControllerAsyncClient", +) diff --git a/google/cloud/dataproc_v1/services/job_controller/async_client.py b/google/cloud/dataproc_v1/services/job_controller/async_client.py new file mode 100644 index 00000000..ed4b2e02 --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/async_client.py @@ -0,0 +1,722 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1.services.job_controller import pagers +from google.cloud.dataproc_v1.types import jobs + +from .transports.base import JobControllerTransport +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport +from .client import JobControllerClient + + +class JobControllerAsyncClient: + """The JobController provides methods to manage jobs.""" + + _client: JobControllerClient + + DEFAULT_ENDPOINT = JobControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobControllerClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = JobControllerClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(JobControllerClient).get_transport_class, type(JobControllerClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, JobControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = JobControllerClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def submit_job( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def submit_job_as_operation( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Submits job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.jobs.Job``: A Dataproc job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job_as_operation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + async def get_job( + self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (:class:`~.jobs.GetJobRequest`): + The request object. A request to get the resource + representation for a job in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.GetJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_jobs( + self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsAsyncPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (:class:`~.jobs.ListJobsRequest`): + The request object. A request to list jobs in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListJobsAsyncPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, filter]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.ListJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_jobs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListJobsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_job( + self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (:class:`~.jobs.UpdateJobRequest`): + The request object. A request to update a job. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def cancel_job( + self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (:class:`~.jobs.CancelJobRequest`): + The request object. A request to cancel a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.CancelJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_job( + self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (:class:`~.jobs.DeleteJobRequest`): + The request object. A request to delete a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.DeleteJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("JobControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/job_controller/client.py b/google/cloud/dataproc_v1/services/job_controller/client.py new file mode 100644 index 00000000..157d913e --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/client.py @@ -0,0 +1,805 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1.services.job_controller import pagers +from google.cloud.dataproc_v1.types import jobs + +from .transports.base import JobControllerTransport +from .transports.grpc import JobControllerGrpcTransport +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +class JobControllerClientMeta(type): + """Metaclass for the JobController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] + _transport_registry["grpc"] = JobControllerGrpcTransport + _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[JobControllerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobControllerClient(metaclass=JobControllerClientMeta): + """The JobController provides methods to manage jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, JobControllerTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobControllerTransport): + # transport is a JobControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def submit_job( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def submit_job_as_operation( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Submits job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.jobs.Job``: A Dataproc job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + def get_job( + self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (:class:`~.jobs.GetJobRequest`): + The request object. A request to get the resource + representation for a job in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.GetJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.GetJobRequest): + request = jobs.GetJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_jobs( + self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (:class:`~.jobs.ListJobsRequest`): + The request object. A request to list jobs in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListJobsPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.ListJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.ListJobsRequest): + request = jobs.ListJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_jobs] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListJobsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_job( + self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (:class:`~.jobs.UpdateJobRequest`): + The request object. A request to update a job. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.UpdateJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.UpdateJobRequest): + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def cancel_job( + self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (:class:`~.jobs.CancelJobRequest`): + The request object. A request to cancel a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.CancelJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.CancelJobRequest): + request = jobs.CancelJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_job( + self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (:class:`~.jobs.DeleteJobRequest`): + The request object. A request to delete a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.DeleteJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.DeleteJobRequest): + request = jobs.DeleteJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_job] + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("JobControllerClient",) diff --git a/google/cloud/dataproc_v1/services/job_controller/pagers.py b/google/cloud/dataproc_v1/services/job_controller/pagers.py new file mode 100644 index 00000000..185f0ace --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/pagers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1.types import jobs + + +class ListJobsPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`~.jobs.ListJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`~.jobs.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., jobs.ListJobsResponse], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.jobs.ListJobsRequest`): + The initial request object. + response (:class:`~.jobs.ListJobsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[jobs.Job]: + for page in self.pages: + yield from page.jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListJobsAsyncPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`~.jobs.ListJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`~.jobs.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[jobs.ListJobsResponse]], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.jobs.ListJobsRequest`): + The initial request object. + response (:class:`~.jobs.ListJobsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[jobs.Job]: + async def async_generator(): + async for page in self.pages: + for response in page.jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py b/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py new file mode 100644 index 00000000..a3d68663 --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobControllerTransport +from .grpc import JobControllerGrpcTransport +from .grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] +_transport_registry["grpc"] = JobControllerGrpcTransport +_transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport + + +__all__ = ( + "JobControllerTransport", + "JobControllerGrpcTransport", + "JobControllerGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/google/cloud/dataproc_v1/services/job_controller/transports/base.py new file mode 100644 index 00000000..d4200ffd --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/transports/base.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class JobControllerTransport(abc.ABC): + """Abstract transport class for JobController.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.submit_job: gapic_v1.method.wrap_method( + self.submit_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.submit_job_as_operation: gapic_v1.method.wrap_method( + self.submit_job_as_operation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.get_job: gapic_v1.method.wrap_method( + self.get_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.list_jobs: gapic_v1.method.wrap_method( + self.list_jobs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.update_job: gapic_v1.method.wrap_method( + self.update_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.cancel_job: gapic_v1.method.wrap_method( + self.cancel_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.delete_job: gapic_v1.method.wrap_method( + self.delete_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def submit_job( + self, + ) -> typing.Callable[ + [jobs.SubmitJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def submit_job_as_operation( + self, + ) -> typing.Callable[ + [jobs.SubmitJobRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_job( + self, + ) -> typing.Callable[ + [jobs.GetJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def list_jobs( + self, + ) -> typing.Callable[ + [jobs.ListJobsRequest], + typing.Union[jobs.ListJobsResponse, typing.Awaitable[jobs.ListJobsResponse]], + ]: + raise NotImplementedError() + + @property + def update_job( + self, + ) -> typing.Callable[ + [jobs.UpdateJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def cancel_job( + self, + ) -> typing.Callable[ + [jobs.CancelJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def delete_job( + self, + ) -> typing.Callable[ + [jobs.DeleteJobRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("JobControllerTransport",) diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py new file mode 100644 index 00000000..6174c16b --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py @@ -0,0 +1,402 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import JobControllerTransport + + +class JobControllerGrpcTransport(JobControllerTransport): + """gRPC backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def submit_job(self) -> Callable[[jobs.SubmitJobRequest], jobs.Job]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job" not in self._stubs: + self._stubs["submit_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/SubmitJob", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["submit_job"] + + @property + def submit_job_as_operation( + self, + ) -> Callable[[jobs.SubmitJobRequest], operations.Operation]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job_as_operation" not in self._stubs: + self._stubs["submit_job_as_operation"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["submit_job_as_operation"] + + @property + def get_job(self) -> Callable[[jobs.GetJobRequest], jobs.Job]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_job" not in self._stubs: + self._stubs["get_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/GetJob", + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["get_job"] + + @property + def list_jobs(self) -> Callable[[jobs.ListJobsRequest], jobs.ListJobsResponse]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + ~.ListJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_jobs" not in self._stubs: + self._stubs["list_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/ListJobs", + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs["list_jobs"] + + @property + def update_job(self) -> Callable[[jobs.UpdateJobRequest], jobs.Job]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_job" not in self._stubs: + self._stubs["update_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/UpdateJob", + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["update_job"] + + @property + def cancel_job(self) -> Callable[[jobs.CancelJobRequest], jobs.Job]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_job" not in self._stubs: + self._stubs["cancel_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/CancelJob", + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["cancel_job"] + + @property + def delete_job(self) -> Callable[[jobs.DeleteJobRequest], empty.Empty]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_job" not in self._stubs: + self._stubs["delete_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/DeleteJob", + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_job"] + + +__all__ = ("JobControllerGrpcTransport",) diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..999141bf --- /dev/null +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py @@ -0,0 +1,397 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import jobs +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import JobControllerTransport +from .grpc import JobControllerGrpcTransport + + +class JobControllerGrpcAsyncIOTransport(JobControllerTransport): + """gRPC AsyncIO backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def submit_job(self) -> Callable[[jobs.SubmitJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job" not in self._stubs: + self._stubs["submit_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/SubmitJob", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["submit_job"] + + @property + def submit_job_as_operation( + self, + ) -> Callable[[jobs.SubmitJobRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job_as_operation" not in self._stubs: + self._stubs["submit_job_as_operation"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/SubmitJobAsOperation", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["submit_job_as_operation"] + + @property + def get_job(self) -> Callable[[jobs.GetJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_job" not in self._stubs: + self._stubs["get_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/GetJob", + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["get_job"] + + @property + def list_jobs( + self, + ) -> Callable[[jobs.ListJobsRequest], Awaitable[jobs.ListJobsResponse]]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + Awaitable[~.ListJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_jobs" not in self._stubs: + self._stubs["list_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/ListJobs", + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs["list_jobs"] + + @property + def update_job(self) -> Callable[[jobs.UpdateJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_job" not in self._stubs: + self._stubs["update_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/UpdateJob", + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["update_job"] + + @property + def cancel_job(self) -> Callable[[jobs.CancelJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_job" not in self._stubs: + self._stubs["cancel_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/CancelJob", + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["cancel_job"] + + @property + def delete_job(self) -> Callable[[jobs.DeleteJobRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_job" not in self._stubs: + self._stubs["delete_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.JobController/DeleteJob", + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_job"] + + +__all__ = ("JobControllerGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py b/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py new file mode 100644 index 00000000..8e92d92d --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import WorkflowTemplateServiceClient +from .async_client import WorkflowTemplateServiceAsyncClient + +__all__ = ( + "WorkflowTemplateServiceClient", + "WorkflowTemplateServiceAsyncClient", +) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py new file mode 100644 index 00000000..dc5c0f22 --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py @@ -0,0 +1,865 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1.services.workflow_template_service import pagers +from google.cloud.dataproc_v1.types import workflow_templates +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import WorkflowTemplateServiceTransport +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport +from .client import WorkflowTemplateServiceClient + + +class WorkflowTemplateServiceAsyncClient: + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + _client: WorkflowTemplateServiceClient + + DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT + + workflow_template_path = staticmethod( + WorkflowTemplateServiceClient.workflow_template_path + ) + + from_service_account_file = WorkflowTemplateServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(WorkflowTemplateServiceClient).get_transport_class, + type(WorkflowTemplateServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = WorkflowTemplateServiceClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def create_workflow_template( + self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + The request object. A request to create a workflow + template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The Dataproc workflow + template to create. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, template]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.CreateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_workflow_template( + self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + The request object. A request to fetch a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.GetWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def instantiate_workflow_template( + self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[ + workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + The request object. A request to instantiate a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 100 + characters. + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, parameters]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def instantiate_inline_workflow_template( + self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + The request object. A request to instantiate an inline + workflow template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The workflow template to + instantiate. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, template]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_inline_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def update_workflow_template( + self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + The request object. A request to update a workflow + template. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([template]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("template.name", request.template.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_workflow_templates( + self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesAsyncPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The request object. A request to list workflow templates + in a project. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListWorkflowTemplatesAsyncPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.ListWorkflowTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_workflow_templates, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListWorkflowTemplatesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_workflow_template( + self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("WorkflowTemplateServiceAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/google/cloud/dataproc_v1/services/workflow_template_service/client.py new file mode 100644 index 00000000..9b380a9a --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/client.py @@ -0,0 +1,980 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1.services.workflow_template_service import pagers +from google.cloud.dataproc_v1.types import workflow_templates +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import WorkflowTemplateServiceTransport +from .transports.grpc import WorkflowTemplateServiceGrpcTransport +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +class WorkflowTemplateServiceClientMeta(type): + """Metaclass for the WorkflowTemplateService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[WorkflowTemplateServiceTransport]] + _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport + _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[WorkflowTemplateServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class WorkflowTemplateServiceClient(metaclass=WorkflowTemplateServiceClientMeta): + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def workflow_template_path( + project: str, region: str, workflow_template: str, + ) -> str: + """Return a fully-qualified workflow_template string.""" + return "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( + project=project, region=region, workflow_template=workflow_template, + ) + + @staticmethod + def parse_workflow_template_path(path: str) -> Dict[str, str]: + """Parse a workflow_template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/workflowTemplates/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, WorkflowTemplateServiceTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, WorkflowTemplateServiceTransport): + # transport is a WorkflowTemplateServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def create_workflow_template( + self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + The request object. A request to create a workflow + template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The Dataproc workflow + template to create. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.CreateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.CreateWorkflowTemplateRequest): + request = workflow_templates.CreateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_workflow_template( + self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + The request object. A request to fetch a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.GetWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.GetWorkflowTemplateRequest): + request = workflow_templates.GetWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def instantiate_workflow_template( + self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[ + workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + The request object. A request to instantiate a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 100 + characters. + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, parameters]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, workflow_templates.InstantiateWorkflowTemplateRequest + ): + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.instantiate_workflow_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def instantiate_inline_workflow_template( + self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + The request object. A request to instantiate an inline + workflow template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The workflow template to + instantiate. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateInlineWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, workflow_templates.InstantiateInlineWorkflowTemplateRequest + ): + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.instantiate_inline_workflow_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def update_workflow_template( + self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + The request object. A request to update a workflow + template. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([template]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.UpdateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.UpdateWorkflowTemplateRequest): + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("template.name", request.template.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_workflow_templates( + self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The request object. A request to list workflow templates + in a project. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListWorkflowTemplatesPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.ListWorkflowTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.ListWorkflowTemplatesRequest): + request = workflow_templates.ListWorkflowTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_workflow_templates] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListWorkflowTemplatesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_workflow_template( + self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.DeleteWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.DeleteWorkflowTemplateRequest): + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("WorkflowTemplateServiceClient",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py b/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py new file mode 100644 index 00000000..86a35f48 --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/pagers.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1.types import workflow_templates + + +class ListWorkflowTemplatesPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., workflow_templates.ListWorkflowTemplatesResponse], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The initial request object. + response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[workflow_templates.WorkflowTemplate]: + for page in self.pages: + yield from page.templates + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListWorkflowTemplatesAsyncPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[workflow_templates.ListWorkflowTemplatesResponse] + ], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The initial request object. + response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[workflow_templates.WorkflowTemplate]: + async def async_generator(): + async for page in self.pages: + for response in page.templates: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py new file mode 100644 index 00000000..eb32b364 --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import WorkflowTemplateServiceTransport +from .grpc import WorkflowTemplateServiceGrpcTransport +from .grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[WorkflowTemplateServiceTransport]] +_transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport +_transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport + + +__all__ = ( + "WorkflowTemplateServiceTransport", + "WorkflowTemplateServiceGrpcTransport", + "WorkflowTemplateServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py new file mode 100644 index 00000000..a1bc72b0 --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class WorkflowTemplateServiceTransport(abc.ABC): + """Abstract transport class for WorkflowTemplateService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_workflow_template: gapic_v1.method.wrap_method( + self.create_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.get_workflow_template: gapic_v1.method.wrap_method( + self.get_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.instantiate_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_inline_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.update_workflow_template: gapic_v1.method.wrap_method( + self.update_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.list_workflow_templates: gapic_v1.method.wrap_method( + self.list_workflow_templates, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.delete_workflow_template: gapic_v1.method.wrap_method( + self.delete_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + typing.Union[ + workflow_templates.WorkflowTemplate, + typing.Awaitable[workflow_templates.WorkflowTemplate], + ], + ]: + raise NotImplementedError() + + @property + def get_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + typing.Union[ + workflow_templates.WorkflowTemplate, + typing.Awaitable[workflow_templates.WorkflowTemplate], + ], + ]: + raise NotImplementedError() + + @property + def instantiate_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def instantiate_inline_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def update_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + typing.Union[ + workflow_templates.WorkflowTemplate, + typing.Awaitable[workflow_templates.WorkflowTemplate], + ], + ]: + raise NotImplementedError() + + @property + def list_workflow_templates( + self, + ) -> typing.Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + typing.Union[ + workflow_templates.ListWorkflowTemplatesResponse, + typing.Awaitable[workflow_templates.ListWorkflowTemplatesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("WorkflowTemplateServiceTransport",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py new file mode 100644 index 00000000..5a9e8b61 --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import WorkflowTemplateServiceTransport + + +class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport): + """gRPC backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_workflow_template( + self, + ) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate, + ]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_workflow_template" not in self._stubs: + self._stubs["create_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate", + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["create_workflow_template"] + + @property + def get_workflow_template( + self, + ) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate, + ]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_workflow_template" not in self._stubs: + self._stubs["get_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate", + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["get_workflow_template"] + + @property + def instantiate_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], operations.Operation + ]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate", + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_workflow_template"] + + @property + def instantiate_inline_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + operations.Operation, + ]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_inline_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_inline_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate", + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_inline_workflow_template"] + + @property + def update_workflow_template( + self, + ) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate, + ]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_workflow_template" not in self._stubs: + self._stubs["update_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate", + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["update_workflow_template"] + + @property + def list_workflow_templates( + self, + ) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + workflow_templates.ListWorkflowTemplatesResponse, + ]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + ~.ListWorkflowTemplatesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_workflow_templates" not in self._stubs: + self._stubs["list_workflow_templates"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates", + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs["list_workflow_templates"] + + @property + def delete_workflow_template( + self, + ) -> Callable[[workflow_templates.DeleteWorkflowTemplateRequest], empty.Empty]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_workflow_template" not in self._stubs: + self._stubs["delete_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate", + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_workflow_template"] + + +__all__ = ("WorkflowTemplateServiceGrpcTransport",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..9e3f6355 --- /dev/null +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py @@ -0,0 +1,478 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import WorkflowTemplateServiceTransport +from .grpc import WorkflowTemplateServiceGrpcTransport + + +class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTransport): + """gRPC AsyncIO backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_workflow_template( + self, + ) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate], + ]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_workflow_template" not in self._stubs: + self._stubs["create_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/CreateWorkflowTemplate", + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["create_workflow_template"] + + @property + def get_workflow_template( + self, + ) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate], + ]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_workflow_template" not in self._stubs: + self._stubs["get_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/GetWorkflowTemplate", + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["get_workflow_template"] + + @property + def instantiate_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateWorkflowTemplate", + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_workflow_template"] + + @property + def instantiate_inline_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_inline_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_inline_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/InstantiateInlineWorkflowTemplate", + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_inline_workflow_template"] + + @property + def update_workflow_template( + self, + ) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate], + ]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_workflow_template" not in self._stubs: + self._stubs["update_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/UpdateWorkflowTemplate", + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["update_workflow_template"] + + @property + def list_workflow_templates( + self, + ) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + Awaitable[workflow_templates.ListWorkflowTemplatesResponse], + ]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + Awaitable[~.ListWorkflowTemplatesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_workflow_templates" not in self._stubs: + self._stubs["list_workflow_templates"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/ListWorkflowTemplates", + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs["list_workflow_templates"] + + @property + def delete_workflow_template( + self, + ) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_workflow_template" not in self._stubs: + self._stubs["delete_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.WorkflowTemplateService/DeleteWorkflowTemplate", + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_workflow_template"] + + +__all__ = ("WorkflowTemplateServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1/types.py b/google/cloud/dataproc_v1/types.py deleted file mode 100644 index ea3e3add..00000000 --- a/google/cloud/dataproc_v1/types.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1.proto import clusters_pb2 -from google.cloud.dataproc_v1.proto import jobs_pb2 -from google.cloud.dataproc_v1.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.dataproc_v1.proto import workflow_templates_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - longrunning_operations_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - autoscaling_policies_pb2, - clusters_pb2, - jobs_pb2, - proto_operations_pb2, - workflow_templates_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.dataproc_v1.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/dataproc_v1/types/__init__.py b/google/cloud/dataproc_v1/types/__init__.py new file mode 100644 index 00000000..bfcbf982 --- /dev/null +++ b/google/cloud/dataproc_v1/types/__init__.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .autoscaling_policies import ( + AutoscalingPolicy, + BasicAutoscalingAlgorithm, + BasicYarnAutoscalingConfig, + InstanceGroupAutoscalingPolicyConfig, + CreateAutoscalingPolicyRequest, + GetAutoscalingPolicyRequest, + UpdateAutoscalingPolicyRequest, + DeleteAutoscalingPolicyRequest, + ListAutoscalingPoliciesRequest, + ListAutoscalingPoliciesResponse, +) +from .clusters import ( + Cluster, + ClusterConfig, + EndpointConfig, + AutoscalingConfig, + EncryptionConfig, + GceClusterConfig, + InstanceGroupConfig, + ManagedGroupConfig, + AcceleratorConfig, + DiskConfig, + NodeInitializationAction, + ClusterStatus, + SecurityConfig, + KerberosConfig, + SoftwareConfig, + LifecycleConfig, + ClusterMetrics, + CreateClusterRequest, + UpdateClusterRequest, + DeleteClusterRequest, + GetClusterRequest, + ListClustersRequest, + ListClustersResponse, + DiagnoseClusterRequest, + DiagnoseClusterResults, + ReservationAffinity, +) +from .jobs import ( + LoggingConfig, + HadoopJob, + SparkJob, + PySparkJob, + QueryList, + HiveJob, + SparkSqlJob, + PigJob, + SparkRJob, + PrestoJob, + JobPlacement, + JobStatus, + JobReference, + YarnApplication, + Job, + JobScheduling, + SubmitJobRequest, + JobMetadata, + GetJobRequest, + ListJobsRequest, + UpdateJobRequest, + ListJobsResponse, + CancelJobRequest, + DeleteJobRequest, +) +from .operations import ( + ClusterOperationStatus, + ClusterOperationMetadata, +) +from .workflow_templates import ( + WorkflowTemplate, + WorkflowTemplatePlacement, + ManagedCluster, + ClusterSelector, + OrderedJob, + TemplateParameter, + ParameterValidation, + RegexValidation, + ValueValidation, + WorkflowMetadata, + ClusterOperation, + WorkflowGraph, + WorkflowNode, + CreateWorkflowTemplateRequest, + GetWorkflowTemplateRequest, + InstantiateWorkflowTemplateRequest, + InstantiateInlineWorkflowTemplateRequest, + UpdateWorkflowTemplateRequest, + ListWorkflowTemplatesRequest, + ListWorkflowTemplatesResponse, + DeleteWorkflowTemplateRequest, +) + + +__all__ = ( + "AutoscalingPolicy", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "InstanceGroupAutoscalingPolicyConfig", + "CreateAutoscalingPolicyRequest", + "GetAutoscalingPolicyRequest", + "UpdateAutoscalingPolicyRequest", + "DeleteAutoscalingPolicyRequest", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + "Cluster", + "ClusterConfig", + "EndpointConfig", + "AutoscalingConfig", + "EncryptionConfig", + "GceClusterConfig", + "InstanceGroupConfig", + "ManagedGroupConfig", + "AcceleratorConfig", + "DiskConfig", + "NodeInitializationAction", + "ClusterStatus", + "SecurityConfig", + "KerberosConfig", + "SoftwareConfig", + "LifecycleConfig", + "ClusterMetrics", + "CreateClusterRequest", + "UpdateClusterRequest", + "DeleteClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "ReservationAffinity", + "LoggingConfig", + "HadoopJob", + "SparkJob", + "PySparkJob", + "QueryList", + "HiveJob", + "SparkSqlJob", + "PigJob", + "SparkRJob", + "PrestoJob", + "JobPlacement", + "JobStatus", + "JobReference", + "YarnApplication", + "Job", + "JobScheduling", + "SubmitJobRequest", + "JobMetadata", + "GetJobRequest", + "ListJobsRequest", + "UpdateJobRequest", + "ListJobsResponse", + "CancelJobRequest", + "DeleteJobRequest", + "ClusterOperationStatus", + "ClusterOperationMetadata", + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "ManagedCluster", + "ClusterSelector", + "OrderedJob", + "TemplateParameter", + "ParameterValidation", + "RegexValidation", + "ValueValidation", + "WorkflowMetadata", + "ClusterOperation", + "WorkflowGraph", + "WorkflowNode", + "CreateWorkflowTemplateRequest", + "GetWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "InstantiateInlineWorkflowTemplateRequest", + "UpdateWorkflowTemplateRequest", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "DeleteWorkflowTemplateRequest", +) diff --git a/google/cloud/dataproc_v1/types/autoscaling_policies.py b/google/cloud/dataproc_v1/types/autoscaling_policies.py new file mode 100644 index 00000000..136dc3f7 --- /dev/null +++ b/google/cloud/dataproc_v1/types/autoscaling_policies.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", + manifest={ + "AutoscalingPolicy", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "InstanceGroupAutoscalingPolicyConfig", + "CreateAutoscalingPolicyRequest", + "GetAutoscalingPolicyRequest", + "UpdateAutoscalingPolicyRequest", + "DeleteAutoscalingPolicyRequest", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + }, +) + + +class AutoscalingPolicy(proto.Message): + r"""Describes an autoscaling policy for Dataproc cluster + autoscaler. + + Attributes: + id (str): + Required. The policy id. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + name (str): + Output only. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + basic_algorithm (~.autoscaling_policies.BasicAutoscalingAlgorithm): + + worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + Required. Describes how the autoscaler will + operate for primary workers. + secondary_worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + Optional. Describes how the autoscaler will + operate for secondary workers. + """ + + id = proto.Field(proto.STRING, number=1) + + name = proto.Field(proto.STRING, number=2) + + basic_algorithm = proto.Field( + proto.MESSAGE, number=3, oneof="algorithm", message="BasicAutoscalingAlgorithm", + ) + + worker_config = proto.Field( + proto.MESSAGE, number=4, message="InstanceGroupAutoscalingPolicyConfig", + ) + + secondary_worker_config = proto.Field( + proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig", + ) + + +class BasicAutoscalingAlgorithm(proto.Message): + r"""Basic algorithm for autoscaling. + + Attributes: + yarn_config (~.autoscaling_policies.BasicYarnAutoscalingConfig): + Required. YARN autoscaling configuration. + cooldown_period (~.duration.Duration): + Optional. Duration between scaling events. A scaling period + starts after the update operation from the previous event + has completed. + + Bounds: [2m, 1d]. Default: 2m. + """ + + yarn_config = proto.Field( + proto.MESSAGE, number=1, message="BasicYarnAutoscalingConfig", + ) + + cooldown_period = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + +class BasicYarnAutoscalingConfig(proto.Message): + r"""Basic autoscaling configurations for YARN. + + Attributes: + graceful_decommission_timeout (~.duration.Duration): + Required. Timeout for YARN graceful decommissioning of Node + Managers. Specifies the duration to wait for jobs to + complete before forcefully removing workers (and potentially + interrupting jobs). Only applicable to downscaling + operations. + + Bounds: [0s, 1d]. + scale_up_factor (float): + Required. Fraction of average YARN pending memory in the + last cooldown period for which to add workers. A scale-up + factor of 1.0 will result in scaling up so that there is no + pending memory remaining after the update (more aggressive + scaling). A scale-up factor closer to 0 will result in a + smaller magnitude of scaling up (less aggressive scaling). + See `How autoscaling + works `__ + for more information. + + Bounds: [0.0, 1.0]. + scale_down_factor (float): + Required. Fraction of average YARN pending memory in the + last cooldown period for which to remove workers. A + scale-down factor of 1 will result in scaling down so that + there is no available memory remaining after the update + (more aggressive scaling). A scale-down factor of 0 disables + removing workers, which can be beneficial for autoscaling a + single job. See `How autoscaling + works `__ + for more information. + + Bounds: [0.0, 1.0]. + scale_up_min_worker_fraction (float): + Optional. Minimum scale-up threshold as a fraction of total + cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2-worker scale-up for the cluster + to scale. A threshold of 0 means the autoscaler will scale + up on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + scale_down_min_worker_fraction (float): + Optional. Minimum scale-down threshold as a fraction of + total cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2 worker scale-down for the + cluster to scale. A threshold of 0 means the autoscaler will + scale down on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + """ + + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, number=5, message=duration.Duration, + ) + + scale_up_factor = proto.Field(proto.DOUBLE, number=1) + + scale_down_factor = proto.Field(proto.DOUBLE, number=2) + + scale_up_min_worker_fraction = proto.Field(proto.DOUBLE, number=3) + + scale_down_min_worker_fraction = proto.Field(proto.DOUBLE, number=4) + + +class InstanceGroupAutoscalingPolicyConfig(proto.Message): + r"""Configuration for the size bounds of an instance group, + including its proportional size to other groups. + + Attributes: + min_instances (int): + Optional. Minimum number of instances for this group. + + Primary workers - Bounds: [2, max_instances]. Default: 2. + Secondary workers - Bounds: [0, max_instances]. Default: 0. + max_instances (int): + Required. Maximum number of instances for this group. + Required for primary workers. Note that by default, clusters + will not use secondary workers. Required for secondary + workers if the minimum secondary instances is set. + + Primary workers - Bounds: [min_instances, ). Secondary + workers - Bounds: [min_instances, ). Default: 0. + weight (int): + Optional. Weight for the instance group, which is used to + determine the fraction of total workers in the cluster from + this instance group. For example, if primary workers have + weight 2, and secondary workers have weight 1, the cluster + will have approximately 2 primary workers for each secondary + worker. + + The cluster may not reach the specified balance if + constrained by min/max bounds or other autoscaling settings. + For example, if ``max_instances`` for secondary workers is + 0, then only primary workers will be added. The cluster can + also be out of balance when created. + + If weight is not set on any instance group, the cluster will + default to equal weight for all groups: the cluster will + attempt to maintain an equal number of workers in each group + within the configured size bounds for each group. If weight + is set for one group only, the cluster will default to zero + weight on the unset group. For example if weight is set only + on primary workers, the cluster will use primary workers + only and no secondary workers. + """ + + min_instances = proto.Field(proto.INT32, number=1) + + max_instances = proto.Field(proto.INT32, number=2) + + weight = proto.Field(proto.INT32, number=3) + + +class CreateAutoscalingPolicyRequest(proto.Message): + r"""A request to create an autoscaling policy. + + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.create``, + the resource name of the location has the following + format: ``projects/{project_id}/locations/{location}`` + policy (~.autoscaling_policies.AutoscalingPolicy): + Required. The autoscaling policy to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + policy = proto.Field(proto.MESSAGE, number=2, message=AutoscalingPolicy,) + + +class GetAutoscalingPolicyRequest(proto.Message): + r"""A request to fetch an autoscaling policy. + + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class UpdateAutoscalingPolicyRequest(proto.Message): + r"""A request to update an autoscaling policy. + + Attributes: + policy (~.autoscaling_policies.AutoscalingPolicy): + Required. The updated autoscaling policy. + """ + + policy = proto.Field(proto.MESSAGE, number=1, message=AutoscalingPolicy,) + + +class DeleteAutoscalingPolicyRequest(proto.Message): + r"""A request to delete an autoscaling policy. + Autoscaling policies in use by one or more clusters will not be + deleted. + + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListAutoscalingPoliciesRequest(proto.Message): + r"""A request to list autoscaling policies in a project. + + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. Must be less than or + equal to 1000. Defaults to 100. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListAutoscalingPoliciesResponse(proto.Message): + r"""A response to a request to list autoscaling policies in a + project. + + Attributes: + policies (Sequence[~.autoscaling_policies.AutoscalingPolicy]): + Output only. Autoscaling policies list. + next_page_token (str): + Output only. This token is included in the + response if there are more results to fetch. + """ + + @property + def raw_page(self): + return self + + policies = proto.RepeatedField(proto.MESSAGE, number=1, message=AutoscalingPolicy,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/clusters.py b/google/cloud/dataproc_v1/types/clusters.py new file mode 100644 index 00000000..50c0f5ee --- /dev/null +++ b/google/cloud/dataproc_v1/types/clusters.py @@ -0,0 +1,1234 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.dataproc_v1.types import shared +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", + manifest={ + "Cluster", + "ClusterConfig", + "EndpointConfig", + "AutoscalingConfig", + "EncryptionConfig", + "GceClusterConfig", + "InstanceGroupConfig", + "ManagedGroupConfig", + "AcceleratorConfig", + "DiskConfig", + "NodeInitializationAction", + "ClusterStatus", + "SecurityConfig", + "KerberosConfig", + "SoftwareConfig", + "LifecycleConfig", + "ClusterMetrics", + "CreateClusterRequest", + "UpdateClusterRequest", + "DeleteClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "ReservationAffinity", + }, +) + + +class Cluster(proto.Message): + r"""Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + Attributes: + project_id (str): + Required. The Google Cloud Platform project + ID that the cluster belongs to. + cluster_name (str): + Required. The cluster name. Cluster names + within a project must be unique. Names of + deleted clusters can be reused. + config (~.gcd_clusters.ClusterConfig): + Required. The cluster config. Note that + Dataproc may set default values, and values may + change when clusters are updated. + labels (Sequence[~.gcd_clusters.Cluster.LabelsEntry]): + Optional. The labels to associate with this cluster. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a cluster. + status (~.gcd_clusters.ClusterStatus): + Output only. Cluster status. + status_history (Sequence[~.gcd_clusters.ClusterStatus]): + Output only. The previous cluster status. + cluster_uuid (str): + Output only. A cluster UUID (Unique Universal + Identifier). Dataproc generates this value when + it creates the cluster. + metrics (~.gcd_clusters.ClusterMetrics): + Output only. Contains cluster daemon metrics such as HDFS + and YARN stats. + + **Beta Feature**: This report is available for testing + purposes only. It may be changed before final release. + """ + + project_id = proto.Field(proto.STRING, number=1) + + cluster_name = proto.Field(proto.STRING, number=2) + + config = proto.Field(proto.MESSAGE, number=3, message="ClusterConfig",) + + labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + status = proto.Field(proto.MESSAGE, number=4, message="ClusterStatus",) + + status_history = proto.RepeatedField( + proto.MESSAGE, number=7, message="ClusterStatus", + ) + + cluster_uuid = proto.Field(proto.STRING, number=6) + + metrics = proto.Field(proto.MESSAGE, number=9, message="ClusterMetrics",) + + +class ClusterConfig(proto.Message): + r"""The cluster config. + + Attributes: + config_bucket (str): + Optional. A Cloud Storage bucket used to stage job + dependencies, config files, and job driver console output. + If you do not specify a staging bucket, Cloud Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's staging bucket according to the Compute + Engine zone where your cluster is deployed, and then create + and manage this project-level, per-location bucket (see + `Dataproc staging + bucket `__). + temp_bucket (str): + Optional. A Cloud Storage bucket used to + store ephemeral cluster and jobs data, such as + Spark and MapReduce history files. If you do not + specify a temp bucket, + Dataproc will determine a Cloud Storage location + (US, ASIA, or EU) for your cluster's temp bucket + according to the Compute Engine zone where your + cluster is deployed, and then create and manage + this project-level, per-location bucket. The + default bucket has a TTL of 90 days, but you can + use any TTL (or none) if you specify a bucket. + gce_cluster_config (~.gcd_clusters.GceClusterConfig): + Optional. The shared Compute Engine config + settings for all instances in a cluster. + master_config (~.gcd_clusters.InstanceGroupConfig): + Optional. The Compute Engine config settings + for the master instance in a cluster. + worker_config (~.gcd_clusters.InstanceGroupConfig): + Optional. The Compute Engine config settings + for worker instances in a cluster. + secondary_worker_config (~.gcd_clusters.InstanceGroupConfig): + Optional. The Compute Engine config settings + for additional worker instances in a cluster. + software_config (~.gcd_clusters.SoftwareConfig): + Optional. The config settings for software + inside the cluster. + initialization_actions (Sequence[~.gcd_clusters.NodeInitializationAction]): + Optional. Commands to execute on each node after config is + completed. By default, executables are run on master and all + worker nodes. You can test a node's ``role`` metadata to run + an executable on a master or worker node, as shown below + using ``curl`` (you can also use ``wget``): + + :: + + ROLE=$(curl -H Metadata-Flavor:Google + http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + if [[ "${ROLE}" == 'Master' ]]; then + ... master specific actions ... + else + ... worker specific actions ... + fi + encryption_config (~.gcd_clusters.EncryptionConfig): + Optional. Encryption settings for the + cluster. + autoscaling_config (~.gcd_clusters.AutoscalingConfig): + Optional. Autoscaling config for the policy + associated with the cluster. Cluster does not + autoscale if this field is unset. + security_config (~.gcd_clusters.SecurityConfig): + Optional. Security settings for the cluster. + lifecycle_config (~.gcd_clusters.LifecycleConfig): + Optional. Lifecycle setting for the cluster. + endpoint_config (~.gcd_clusters.EndpointConfig): + Optional. Port/endpoint configuration for + this cluster + """ + + config_bucket = proto.Field(proto.STRING, number=1) + + temp_bucket = proto.Field(proto.STRING, number=2) + + gce_cluster_config = proto.Field( + proto.MESSAGE, number=8, message="GceClusterConfig", + ) + + master_config = proto.Field(proto.MESSAGE, number=9, message="InstanceGroupConfig",) + + worker_config = proto.Field( + proto.MESSAGE, number=10, message="InstanceGroupConfig", + ) + + secondary_worker_config = proto.Field( + proto.MESSAGE, number=12, message="InstanceGroupConfig", + ) + + software_config = proto.Field(proto.MESSAGE, number=13, message="SoftwareConfig",) + + initialization_actions = proto.RepeatedField( + proto.MESSAGE, number=11, message="NodeInitializationAction", + ) + + encryption_config = proto.Field( + proto.MESSAGE, number=15, message="EncryptionConfig", + ) + + autoscaling_config = proto.Field( + proto.MESSAGE, number=18, message="AutoscalingConfig", + ) + + security_config = proto.Field(proto.MESSAGE, number=16, message="SecurityConfig",) + + lifecycle_config = proto.Field(proto.MESSAGE, number=17, message="LifecycleConfig",) + + endpoint_config = proto.Field(proto.MESSAGE, number=19, message="EndpointConfig",) + + +class EndpointConfig(proto.Message): + r"""Endpoint config for this cluster + + Attributes: + http_ports (Sequence[~.gcd_clusters.EndpointConfig.HttpPortsEntry]): + Output only. The map of port descriptions to URLs. Will only + be populated if enable_http_port_access is true. + enable_http_port_access (bool): + Optional. If true, enable http access to + specific ports on the cluster from external + sources. Defaults to false. + """ + + http_ports = proto.MapField(proto.STRING, proto.STRING, number=1) + + enable_http_port_access = proto.Field(proto.BOOL, number=2) + + +class AutoscalingConfig(proto.Message): + r"""Autoscaling Policy config associated with the cluster. + + Attributes: + policy_uri (str): + Optional. The autoscaling policy used by the cluster. + + Only resource names including projectid and location + (region) are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + - ``projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + + Note that the policy must be in the same project and + Dataproc region. + """ + + policy_uri = proto.Field(proto.STRING, number=1) + + +class EncryptionConfig(proto.Message): + r"""Encryption settings for the cluster. + + Attributes: + gce_pd_kms_key_name (str): + Optional. The Cloud KMS key name to use for + PD disk encryption for all instances in the + cluster. + """ + + gce_pd_kms_key_name = proto.Field(proto.STRING, number=1) + + +class GceClusterConfig(proto.Message): + r"""Common config settings for resources of Compute Engine + cluster instances, applicable to all instances in the cluster. + + Attributes: + zone_uri (str): + Optional. The zone where the Compute Engine cluster will be + located. On a create request, it is required in the "global" + region. If omitted in a non-global Dataproc region, the + service will pick a zone in the corresponding Compute Engine + region. On a get request, zone will always be present. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`` + - ``projects/[project_id]/zones/[zone]`` + - ``us-central1-f`` + network_uri (str): + Optional. The Compute Engine network to be used for machine + communications. Cannot be specified with subnetwork_uri. If + neither ``network_uri`` nor ``subnetwork_uri`` is specified, + the "default" network of the project is used, if it exists. + Cannot be a "Custom Subnet Network" (see `Using + Subnetworks `__ + for more information). + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`` + - ``projects/[project_id]/regions/global/default`` + - ``default`` + subnetwork_uri (str): + Optional. The Compute Engine subnetwork to be used for + machine communications. Cannot be specified with + network_uri. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``sub0`` + internal_ip_only (bool): + Optional. If true, all instances in the cluster will only + have internal IP addresses. By default, clusters are not + restricted to internal IP addresses, and will have ephemeral + external IP addresses assigned to each instance. This + ``internal_ip_only`` restriction can only be enabled for + subnetwork enabled networks, and all off-cluster + dependencies must be configured to be accessible without + external IP addresses. + service_account (str): + Optional. The `Dataproc service + account `__ + (also see `VM Data Plane + identity `__) + used by Dataproc cluster VM instances to access Google Cloud + Platform services. + + If not specified, the `Compute Engine default service + account `__ + is used. + service_account_scopes (Sequence[str]): + Optional. The URIs of service account scopes to be included + in Compute Engine instances. The following base set of + scopes is always included: + + - https://www.googleapis.com/auth/cloud.useraccounts.readonly + - https://www.googleapis.com/auth/devstorage.read_write + - https://www.googleapis.com/auth/logging.write + + If no scopes are specified, the following defaults are also + provided: + + - https://www.googleapis.com/auth/bigquery + - https://www.googleapis.com/auth/bigtable.admin.table + - https://www.googleapis.com/auth/bigtable.data + - https://www.googleapis.com/auth/devstorage.full_control + tags (Sequence[str]): + The Compute Engine tags to add to all instances (see + `Tagging + instances `__). + metadata (Sequence[~.gcd_clusters.GceClusterConfig.MetadataEntry]): + The Compute Engine metadata entries to add to all instances + (see `Project and instance + metadata `__). + reservation_affinity (~.gcd_clusters.ReservationAffinity): + Optional. Reservation Affinity for consuming + Zonal reservation. + """ + + zone_uri = proto.Field(proto.STRING, number=1) + + network_uri = proto.Field(proto.STRING, number=2) + + subnetwork_uri = proto.Field(proto.STRING, number=6) + + internal_ip_only = proto.Field(proto.BOOL, number=7) + + service_account = proto.Field(proto.STRING, number=8) + + service_account_scopes = proto.RepeatedField(proto.STRING, number=3) + + tags = proto.RepeatedField(proto.STRING, number=4) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=5) + + reservation_affinity = proto.Field( + proto.MESSAGE, number=11, message="ReservationAffinity", + ) + + +class InstanceGroupConfig(proto.Message): + r"""The config settings for Compute Engine resources in + an instance group, such as a master or worker group. + + Attributes: + num_instances (int): + Optional. The number of VM instances in the + instance group. For master instance groups, must + be set to 1. + instance_names (Sequence[str]): + Output only. The list of instance names. Dataproc derives + the names from ``cluster_name``, ``num_instances``, and the + instance group. + image_uri (str): + Optional. The Compute Engine image resource used for cluster + instances. + + The URI can represent an image or image family. + + Image examples: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`` + - ``projects/[project_id]/global/images/[image-id]`` + - ``image-id`` + + Image family examples. Dataproc will use the most recent + image from the family: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`` + - ``projects/[project_id]/global/images/family/[custom-image-family-name]`` + + If the URI is unspecified, it will be inferred from + ``SoftwareConfig.image_version`` or the system default. + machine_type_uri (str): + Optional. The Compute Engine machine type used for cluster + instances. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``n1-standard-2`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the machine type + resource, for example, ``n1-standard-2``. + disk_config (~.gcd_clusters.DiskConfig): + Optional. Disk option config settings. + is_preemptible (bool): + Output only. Specifies that this instance + group contains preemptible instances. + preemptibility (~.gcd_clusters.InstanceGroupConfig.Preemptibility): + Optional. Specifies the preemptibility of the instance + group. + + The default value for master and worker groups is + ``NON_PREEMPTIBLE``. This default cannot be changed. + + The default value for secondary instances is + ``PREEMPTIBLE``. + managed_group_config (~.gcd_clusters.ManagedGroupConfig): + Output only. The config for Compute Engine + Instance Group Manager that manages this group. + This is only used for preemptible instance + groups. + accelerators (Sequence[~.gcd_clusters.AcceleratorConfig]): + Optional. The Compute Engine accelerator + configuration for these instances. + min_cpu_platform (str): + Optional. Specifies the minimum cpu platform for the + Instance Group. See `Dataproc -> Minimum CPU + Platform `__. + """ + + class Preemptibility(proto.Enum): + r"""Controls the use of [preemptible instances] + (https://cloud.google.com/compute/docs/instances/preemptible) within + the group. + """ + PREEMPTIBILITY_UNSPECIFIED = 0 + NON_PREEMPTIBLE = 1 + PREEMPTIBLE = 2 + + num_instances = proto.Field(proto.INT32, number=1) + + instance_names = proto.RepeatedField(proto.STRING, number=2) + + image_uri = proto.Field(proto.STRING, number=3) + + machine_type_uri = proto.Field(proto.STRING, number=4) + + disk_config = proto.Field(proto.MESSAGE, number=5, message="DiskConfig",) + + is_preemptible = proto.Field(proto.BOOL, number=6) + + preemptibility = proto.Field(proto.ENUM, number=10, enum=Preemptibility,) + + managed_group_config = proto.Field( + proto.MESSAGE, number=7, message="ManagedGroupConfig", + ) + + accelerators = proto.RepeatedField( + proto.MESSAGE, number=8, message="AcceleratorConfig", + ) + + min_cpu_platform = proto.Field(proto.STRING, number=9) + + +class ManagedGroupConfig(proto.Message): + r"""Specifies the resources used to actively manage an instance + group. + + Attributes: + instance_template_name (str): + Output only. The name of the Instance + Template used for the Managed Instance Group. + instance_group_manager_name (str): + Output only. The name of the Instance Group + Manager for this group. + """ + + instance_template_name = proto.Field(proto.STRING, number=1) + + instance_group_manager_name = proto.Field(proto.STRING, number=2) + + +class AcceleratorConfig(proto.Message): + r"""Specifies the type and number of accelerator cards attached to the + instances of an instance. See `GPUs on Compute + Engine `__. + + Attributes: + accelerator_type_uri (str): + Full URL, partial URI, or short name of the accelerator type + resource to expose to this instance. See `Compute Engine + AcceleratorTypes `__. + + Examples: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``nvidia-tesla-k80`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the accelerator type + resource, for example, ``nvidia-tesla-k80``. + accelerator_count (int): + The number of the accelerator cards of this + type exposed to this instance. + """ + + accelerator_type_uri = proto.Field(proto.STRING, number=1) + + accelerator_count = proto.Field(proto.INT32, number=2) + + +class DiskConfig(proto.Message): + r"""Specifies the config of disk options for a group of VM + instances. + + Attributes: + boot_disk_type (str): + Optional. Type of the boot disk (default is + "pd-standard"). Valid values: "pd-ssd" + (Persistent Disk Solid State Drive) or "pd- + standard" (Persistent Disk Hard Disk Drive). + boot_disk_size_gb (int): + Optional. Size in GB of the boot disk + (default is 500GB). + num_local_ssds (int): + Optional. Number of attached SSDs, from 0 to 4 (default is + 0). If SSDs are not attached, the boot disk is used to store + runtime logs and + `HDFS `__ + data. If one or more SSDs are attached, this runtime bulk + data is spread across them, and the boot disk contains only + basic config and installed binaries. + """ + + boot_disk_type = proto.Field(proto.STRING, number=3) + + boot_disk_size_gb = proto.Field(proto.INT32, number=1) + + num_local_ssds = proto.Field(proto.INT32, number=2) + + +class NodeInitializationAction(proto.Message): + r"""Specifies an executable to run on a fully configured node and + a timeout period for executable completion. + + Attributes: + executable_file (str): + Required. Cloud Storage URI of executable + file. + execution_timeout (~.duration.Duration): + Optional. Amount of time executable has to complete. Default + is 10 minutes (see JSON representation of + `Duration `__). + + Cluster creation fails with an explanatory error message + (the name of the executable that caused the error and the + exceeded timeout period) if the executable is not completed + at end of the timeout period. + """ + + executable_file = proto.Field(proto.STRING, number=1) + + execution_timeout = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + +class ClusterStatus(proto.Message): + r"""The status of a cluster and its instances. + + Attributes: + state (~.gcd_clusters.ClusterStatus.State): + Output only. The cluster's state. + detail (str): + Optional. Output only. Details of cluster's + state. + state_start_time (~.timestamp.Timestamp): + Output only. Time when this state was entered (see JSON + representation of + `Timestamp `__). + substate (~.gcd_clusters.ClusterStatus.Substate): + Output only. Additional state information + that includes status reported by the agent. + """ + + class State(proto.Enum): + r"""The cluster state.""" + UNKNOWN = 0 + CREATING = 1 + RUNNING = 2 + ERROR = 3 + DELETING = 4 + UPDATING = 5 + + class Substate(proto.Enum): + r"""The cluster substate.""" + UNSPECIFIED = 0 + UNHEALTHY = 1 + STALE_STATUS = 2 + + state = proto.Field(proto.ENUM, number=1, enum=State,) + + detail = proto.Field(proto.STRING, number=2) + + state_start_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp.Timestamp, + ) + + substate = proto.Field(proto.ENUM, number=4, enum=Substate,) + + +class SecurityConfig(proto.Message): + r"""Security related configuration, including Kerberos. + + Attributes: + kerberos_config (~.gcd_clusters.KerberosConfig): + Kerberos related configuration. + """ + + kerberos_config = proto.Field(proto.MESSAGE, number=1, message="KerberosConfig",) + + +class KerberosConfig(proto.Message): + r"""Specifies Kerberos related configuration. + + Attributes: + enable_kerberos (bool): + Optional. Flag to indicate whether to + Kerberize the cluster (default: false). Set this + field to true to enable Kerberos on a cluster. + root_principal_password_uri (str): + Required. The Cloud Storage URI of a KMS + encrypted file containing the root principal + password. + kms_key_uri (str): + Required. The uri of the KMS key used to + encrypt various sensitive files. + keystore_uri (str): + Optional. The Cloud Storage URI of the + keystore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + truststore_uri (str): + Optional. The Cloud Storage URI of the + truststore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + keystore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided keystore. For the self-signed + certificate, this password is generated by + Dataproc. + key_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided key. For the self-signed + certificate, this password is generated by + Dataproc. + truststore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided truststore. For the self-signed + certificate, this password is generated by + Dataproc. + cross_realm_trust_realm (str): + Optional. The remote realm the Dataproc on- + luster KDC will trust, should the user enable + cross realm trust. + cross_realm_trust_kdc (str): + Optional. The KDC (IP or hostname) for the + remote trusted realm in a cross realm trust + relationship. + cross_realm_trust_admin_server (str): + Optional. The admin server (IP or hostname) + for the remote trusted realm in a cross realm + trust relationship. + cross_realm_trust_shared_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the shared password + between the on-cluster Kerberos realm and the + remote trusted realm, in a cross realm trust + relationship. + kdc_db_key_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the master key of the + KDC database. + tgt_lifetime_hours (int): + Optional. The lifetime of the ticket granting + ticket, in hours. If not specified, or user + specifies 0, then default value 10 will be used. + realm (str): + Optional. The name of the on-cluster Kerberos + realm. If not specified, the uppercased domain + of hostnames will be the realm. + """ + + enable_kerberos = proto.Field(proto.BOOL, number=1) + + root_principal_password_uri = proto.Field(proto.STRING, number=2) + + kms_key_uri = proto.Field(proto.STRING, number=3) + + keystore_uri = proto.Field(proto.STRING, number=4) + + truststore_uri = proto.Field(proto.STRING, number=5) + + keystore_password_uri = proto.Field(proto.STRING, number=6) + + key_password_uri = proto.Field(proto.STRING, number=7) + + truststore_password_uri = proto.Field(proto.STRING, number=8) + + cross_realm_trust_realm = proto.Field(proto.STRING, number=9) + + cross_realm_trust_kdc = proto.Field(proto.STRING, number=10) + + cross_realm_trust_admin_server = proto.Field(proto.STRING, number=11) + + cross_realm_trust_shared_password_uri = proto.Field(proto.STRING, number=12) + + kdc_db_key_uri = proto.Field(proto.STRING, number=13) + + tgt_lifetime_hours = proto.Field(proto.INT32, number=14) + + realm = proto.Field(proto.STRING, number=15) + + +class SoftwareConfig(proto.Message): + r"""Specifies the selection and config of software inside the + cluster. + + Attributes: + image_version (str): + Optional. The version of software inside the cluster. It + must be one of the supported `Dataproc + Versions `__, + such as "1.2" (including a subminor version, such as + "1.2.29"), or the `"preview" + version `__. + If unspecified, it defaults to the latest Debian version. + properties (Sequence[~.gcd_clusters.SoftwareConfig.PropertiesEntry]): + Optional. The properties to set on daemon config files. + + Property keys are specified in ``prefix:property`` format, + for example ``core:hadoop.tmp.dir``. The following are + supported prefixes and their mappings: + + - capacity-scheduler: ``capacity-scheduler.xml`` + - core: ``core-site.xml`` + - distcp: ``distcp-default.xml`` + - hdfs: ``hdfs-site.xml`` + - hive: ``hive-site.xml`` + - mapred: ``mapred-site.xml`` + - pig: ``pig.properties`` + - spark: ``spark-defaults.conf`` + - yarn: ``yarn-site.xml`` + + For more information, see `Cluster + properties `__. + optional_components (Sequence[~.shared.Component]): + Optional. The set of components to activate + on the cluster. + """ + + image_version = proto.Field(proto.STRING, number=1) + + properties = proto.MapField(proto.STRING, proto.STRING, number=2) + + optional_components = proto.RepeatedField( + proto.ENUM, number=3, enum=shared.Component, + ) + + +class LifecycleConfig(proto.Message): + r"""Specifies the cluster auto-delete schedule configuration. + + Attributes: + idle_delete_ttl (~.duration.Duration): + Optional. The duration to keep the cluster alive while + idling (when no jobs are running). Passing this threshold + will cause the cluster to be deleted. Minimum value is 10 + minutes; maximum value is 14 days (see JSON representation + of + `Duration `__. + auto_delete_time (~.timestamp.Timestamp): + Optional. The time when cluster will be auto-deleted (see + JSON representation of + `Timestamp `__). + auto_delete_ttl (~.duration.Duration): + Optional. The lifetime duration of cluster. The cluster will + be auto-deleted at the end of this period. Minimum value is + 10 minutes; maximum value is 14 days (see JSON + representation of + `Duration `__). + idle_start_time (~.timestamp.Timestamp): + Output only. The time when cluster became idle (most recent + job finished) and became eligible for deletion due to + idleness (see JSON representation of + `Timestamp `__). + """ + + idle_delete_ttl = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + auto_delete_time = proto.Field( + proto.MESSAGE, number=2, oneof="ttl", message=timestamp.Timestamp, + ) + + auto_delete_ttl = proto.Field( + proto.MESSAGE, number=3, oneof="ttl", message=duration.Duration, + ) + + idle_start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class ClusterMetrics(proto.Message): + r"""Contains cluster daemon metrics, such as HDFS and YARN stats. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + hdfs_metrics (Sequence[~.gcd_clusters.ClusterMetrics.HdfsMetricsEntry]): + The HDFS metrics. + yarn_metrics (Sequence[~.gcd_clusters.ClusterMetrics.YarnMetricsEntry]): + The YARN metrics. + """ + + hdfs_metrics = proto.MapField(proto.STRING, proto.INT64, number=1) + + yarn_metrics = proto.MapField(proto.STRING, proto.INT64, number=2) + + +class CreateClusterRequest(proto.Message): + r"""A request to create a cluster. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster (~.gcd_clusters.Cluster): + Required. The cluster to create. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster = proto.Field(proto.MESSAGE, number=2, message=Cluster,) + + request_id = proto.Field(proto.STRING, number=4) + + +class UpdateClusterRequest(proto.Message): + r"""A request to update a cluster. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster (~.gcd_clusters.Cluster): + Required. The changes to the cluster. + graceful_decommission_timeout (~.duration.Duration): + Optional. Timeout for graceful YARN decomissioning. Graceful + decommissioning allows removing nodes from the cluster + without interrupting jobs in progress. Timeout specifies how + long to wait for jobs in progress to finish before + forcefully removing nodes (and potentially interrupting + jobs). Default timeout is 0 (for forceful decommission), and + the maximum allowed timeout is 1 day. (see JSON + representation of + `Duration `__). + + Only supported on Dataproc image versions 1.2 and higher. + update_mask (~.field_mask.FieldMask): + Required. Specifies the path, relative to ``Cluster``, of + the field to update. For example, to change the number of + workers in a cluster to 5, the ``update_mask`` parameter + would be specified as + ``config.worker_config.num_instances``, and the ``PATCH`` + request body would specify the new value, as follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers in a + cluster to 5, the ``update_mask`` parameter would be + ``config.secondary_worker_config.num_instances``, and the + ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: Currently, only the following fields can be updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
config.autoscaling_config.policy_uriUse, stop using, or + change autoscaling policies
+ request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=5) + + cluster_name = proto.Field(proto.STRING, number=2) + + cluster = proto.Field(proto.MESSAGE, number=3, message=Cluster,) + + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, number=6, message=duration.Duration, + ) + + update_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask.FieldMask,) + + request_id = proto.Field(proto.STRING, number=7) + + +class DeleteClusterRequest(proto.Message): + r"""A request to delete a cluster. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster_uuid (str): + Optional. Specifying the ``cluster_uuid`` means the RPC + should fail (with error NOT_FOUND) if cluster with specified + UUID does not exist. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=2) + + cluster_uuid = proto.Field(proto.STRING, number=4) + + request_id = proto.Field(proto.STRING, number=5) + + +class GetClusterRequest(proto.Message): + r"""Request to get the resource representation for a cluster in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=2) + + +class ListClustersRequest(proto.Message): + r"""A request to list the clusters in a project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + filter (str): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, ``clusterName``, + or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** + can be ``*`` to match all values. ``status.state`` can be + one of the following: ``ACTIVE``, ``INACTIVE``, + ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or + ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, + ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains + the ``DELETING`` and ``ERROR`` states. ``clusterName`` is + the name of the cluster provided at creation time. Only the + logical ``AND`` operator is supported; space-separated items + are treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + page_size (int): + Optional. The standard List page size. + page_token (str): + Optional. The standard List page token. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=4) + + filter = proto.Field(proto.STRING, number=5) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListClustersResponse(proto.Message): + r"""The list of all clusters in a project. + + Attributes: + clusters (Sequence[~.gcd_clusters.Cluster]): + Output only. The clusters in the project. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ``ListClustersRequest``. + """ + + @property + def raw_page(self): + return self + + clusters = proto.RepeatedField(proto.MESSAGE, number=1, message=Cluster,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DiagnoseClusterRequest(proto.Message): + r"""A request to collect cluster diagnostic information. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=2) + + +class DiagnoseClusterResults(proto.Message): + r"""The location of diagnostic output. + + Attributes: + output_uri (str): + Output only. The Cloud Storage URI of the + diagnostic output. The output report is a plain + text file with a summary of collected + diagnostics. + """ + + output_uri = proto.Field(proto.STRING, number=1) + + +class ReservationAffinity(proto.Message): + r"""Reservation Affinity for consuming Zonal reservation. + + Attributes: + consume_reservation_type (~.gcd_clusters.ReservationAffinity.Type): + Optional. Type of reservation to consume + key (str): + Optional. Corresponds to the label key of + reservation resource. + values (Sequence[str]): + Optional. Corresponds to the label values of + reservation resource. + """ + + class Type(proto.Enum): + r"""Indicates whether to consume capacity from an reservation or + not. + """ + TYPE_UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + consume_reservation_type = proto.Field(proto.ENUM, number=1, enum=Type,) + + key = proto.Field(proto.STRING, number=2) + + values = proto.RepeatedField(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/jobs.py b/google/cloud/dataproc_v1/types/jobs.py new file mode 100644 index 00000000..c4456e0b --- /dev/null +++ b/google/cloud/dataproc_v1/types/jobs.py @@ -0,0 +1,1060 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", + manifest={ + "LoggingConfig", + "HadoopJob", + "SparkJob", + "PySparkJob", + "QueryList", + "HiveJob", + "SparkSqlJob", + "PigJob", + "SparkRJob", + "PrestoJob", + "JobPlacement", + "JobStatus", + "JobReference", + "YarnApplication", + "Job", + "JobScheduling", + "SubmitJobRequest", + "JobMetadata", + "GetJobRequest", + "ListJobsRequest", + "UpdateJobRequest", + "ListJobsResponse", + "CancelJobRequest", + "DeleteJobRequest", + }, +) + + +class LoggingConfig(proto.Message): + r"""The runtime logging config of the job. + + Attributes: + driver_log_levels (Sequence[~.gcd_jobs.LoggingConfig.DriverLogLevelsEntry]): + The per-package log levels for the driver. + This may include "root" package name to + configure rootLogger. Examples: + 'com.google = FATAL', 'root = INFO', + 'org.apache = DEBUG' + """ + + class Level(proto.Enum): + r"""The Log4j level for job execution. When running an `Apache + Hive `__ job, Cloud Dataproc configures the + Hive client to an equivalent verbosity level. + """ + LEVEL_UNSPECIFIED = 0 + ALL = 1 + TRACE = 2 + DEBUG = 3 + INFO = 4 + WARN = 5 + ERROR = 6 + FATAL = 7 + OFF = 8 + + driver_log_levels = proto.MapField(proto.STRING, proto.ENUM, number=2, enum=Level,) + + +class HadoopJob(proto.Message): + r"""A Dataproc job for running `Apache Hadoop + MapReduce `__ + jobs on `Apache Hadoop + YARN `__. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file containing the + main class. Examples: + 'gs://foo-bucket/analytics-binaries/extract- + useful-metrics-mr.jar' 'hdfs:/tmp/test- + samples/custom-wordcount.jar' + 'file:///home/usr/lib/hadoop-mapreduce/hadoop- + mapreduce-examples.jar' + main_class (str): + The name of the driver's main class. The jar file containing + the class must be in the default CLASSPATH or specified in + ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``-libjars`` or ``-Dfoo=bar``, + that can be set as job properties, since a collision may + occur that causes an incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. Jar file URIs to add to the + CLASSPATHs of the Hadoop driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS (Hadoop Compatible Filesystem) + URIs of files to be copied to the working + directory of Hadoop drivers and distributed + tasks. Useful for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of Hadoop + drivers and tasks. Supported file types: .jar, + .tar, .tar.gz, .tgz, or .zip. + properties (Sequence[~.gcd_jobs.HadoopJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Hadoop. Properties that conflict with values set + by the Dataproc API may be overwritten. Can include + properties set in /etc/hadoop/conf/*-site and classes in + user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver") + + main_class = proto.Field(proto.STRING, number=2, oneof="driver") + + args = proto.RepeatedField(proto.STRING, number=3) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=4) + + file_uris = proto.RepeatedField(proto.STRING, number=5) + + archive_uris = proto.RepeatedField(proto.STRING, number=6) + + properties = proto.MapField(proto.STRING, proto.STRING, number=7) + + logging_config = proto.Field(proto.MESSAGE, number=8, message=LoggingConfig,) + + +class SparkJob(proto.Message): + r"""A Dataproc job for running `Apache + Spark `__ applications on YARN. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file that contains + the main class. + main_class (str): + The name of the driver's main class. The jar file that + contains the class must be in the default CLASSPATH or + specified in ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Spark driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[~.gcd_jobs.SparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark. Properties that + conflict with values set by the Dataproc API may + be overwritten. Can include properties set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver") + + main_class = proto.Field(proto.STRING, number=2, oneof="driver") + + args = proto.RepeatedField(proto.STRING, number=3) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=4) + + file_uris = proto.RepeatedField(proto.STRING, number=5) + + archive_uris = proto.RepeatedField(proto.STRING, number=6) + + properties = proto.MapField(proto.STRING, proto.STRING, number=7) + + logging_config = proto.Field(proto.MESSAGE, number=8, message=LoggingConfig,) + + +class PySparkJob(proto.Message): + r"""A Dataproc job for running `Apache + PySpark `__ + applications on YARN. + + Attributes: + main_python_file_uri (str): + Required. The HCFS URI of the main Python + file to use as the driver. Must be a .py file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + python_file_uris (Sequence[str]): + Optional. HCFS file URIs of Python files to + pass to the PySpark framework. Supported file + types: .py, .egg, and .zip. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Python driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[~.gcd_jobs.PySparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure PySpark. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_python_file_uri = proto.Field(proto.STRING, number=1) + + args = proto.RepeatedField(proto.STRING, number=2) + + python_file_uris = proto.RepeatedField(proto.STRING, number=3) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=4) + + file_uris = proto.RepeatedField(proto.STRING, number=5) + + archive_uris = proto.RepeatedField(proto.STRING, number=6) + + properties = proto.MapField(proto.STRING, proto.STRING, number=7) + + logging_config = proto.Field(proto.MESSAGE, number=8, message=LoggingConfig,) + + +class QueryList(proto.Message): + r"""A list of queries to run on a cluster. + + Attributes: + queries (Sequence[str]): + Required. The queries to execute. You do not need to + terminate a query with a semicolon. Multiple queries can be + specified in one string by separating each with a semicolon. + Here is an example of an Cloud Dataproc API snippet that + uses a QueryList to specify a HiveJob: + + :: + + "hiveJob": { + "queryList": { + "queries": [ + "query1", + "query2", + "query3;query4", + ] + } + } + """ + + queries = proto.RepeatedField(proto.STRING, number=1) + + +class HiveJob(proto.Message): + r"""A Dataproc job for running `Apache + Hive `__ queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains Hive + queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[~.gcd_jobs.HiveJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Hive command: ``SET name="value";``). + properties (Sequence[~.gcd_jobs.HiveJob.PropertiesEntry]): + Optional. A mapping of property names and values, used to + configure Hive. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/hive/conf/hive-site.xml, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Hive server and Hadoop + MapReduce (MR) tasks. Can contain Hive SerDes + and UDFs. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + continue_on_failure = proto.Field(proto.BOOL, number=3) + + script_variables = proto.MapField(proto.STRING, proto.STRING, number=4) + + properties = proto.MapField(proto.STRING, proto.STRING, number=5) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=6) + + +class SparkSqlJob(proto.Message): + r"""A Dataproc job for running `Apache Spark + SQL `__ queries. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + script_variables (Sequence[~.gcd_jobs.SparkSqlJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Spark SQL command: SET + ``name="value";``). + properties (Sequence[~.gcd_jobs.SparkSqlJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to be added + to the Spark CLASSPATH. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + script_variables = proto.MapField(proto.STRING, proto.STRING, number=3) + + properties = proto.MapField(proto.STRING, proto.STRING, number=4) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=56) + + logging_config = proto.Field(proto.MESSAGE, number=6, message=LoggingConfig,) + + +class PigJob(proto.Message): + r"""A Dataproc job for running `Apache Pig `__ + queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains the + Pig queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[~.gcd_jobs.PigJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Pig command: ``name=[value]``). + properties (Sequence[~.gcd_jobs.PigJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Pig. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/pig/conf/pig.properties, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Pig Client and Hadoop + MapReduce (MR) tasks. Can contain Pig UDFs. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + continue_on_failure = proto.Field(proto.BOOL, number=3) + + script_variables = proto.MapField(proto.STRING, proto.STRING, number=4) + + properties = proto.MapField(proto.STRING, proto.STRING, number=5) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=6) + + logging_config = proto.Field(proto.MESSAGE, number=7, message=LoggingConfig,) + + +class SparkRJob(proto.Message): + r"""A Dataproc job for running `Apache + SparkR `__ + applications on YARN. + + Attributes: + main_r_file_uri (str): + Required. The HCFS URI of the main R file to + use as the driver. Must be a .R file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be placed in + the working directory of each executor. Useful + for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted into the working directory of each + executor. Supported file types: .jar, .tar, + .tar.gz, .tgz, and .zip. + properties (Sequence[~.gcd_jobs.SparkRJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure SparkR. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_r_file_uri = proto.Field(proto.STRING, number=1) + + args = proto.RepeatedField(proto.STRING, number=2) + + file_uris = proto.RepeatedField(proto.STRING, number=3) + + archive_uris = proto.RepeatedField(proto.STRING, number=4) + + properties = proto.MapField(proto.STRING, proto.STRING, number=5) + + logging_config = proto.Field(proto.MESSAGE, number=6, message=LoggingConfig,) + + +class PrestoJob(proto.Message): + r"""A Dataproc job for running `Presto `__ + queries. **IMPORTANT**: The `Dataproc Presto Optional + Component `__ + must be enabled when the cluster is created to submit a Presto job + to the cluster. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + output_format (str): + Optional. The format in which query output + will be displayed. See the Presto documentation + for supported output formats + client_tags (Sequence[str]): + Optional. Presto client tags to attach to + this query + properties (Sequence[~.gcd_jobs.PrestoJob.PropertiesEntry]): + Optional. A mapping of property names to values. Used to set + Presto `session + properties `__ + Equivalent to using the --session flag in the Presto CLI + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + continue_on_failure = proto.Field(proto.BOOL, number=3) + + output_format = proto.Field(proto.STRING, number=4) + + client_tags = proto.RepeatedField(proto.STRING, number=5) + + properties = proto.MapField(proto.STRING, proto.STRING, number=6) + + logging_config = proto.Field(proto.MESSAGE, number=7, message=LoggingConfig,) + + +class JobPlacement(proto.Message): + r"""Dataproc job config. + + Attributes: + cluster_name (str): + Required. The name of the cluster where the + job will be submitted. + cluster_uuid (str): + Output only. A cluster UUID generated by the + Dataproc service when the job is submitted. + """ + + cluster_name = proto.Field(proto.STRING, number=1) + + cluster_uuid = proto.Field(proto.STRING, number=2) + + +class JobStatus(proto.Message): + r"""Dataproc job status. + + Attributes: + state (~.gcd_jobs.JobStatus.State): + Output only. A state message specifying the + overall job state. + details (str): + Optional. Output only. Job state details, + such as an error description if the state is + ERROR. + state_start_time (~.timestamp.Timestamp): + Output only. The time when this state was + entered. + substate (~.gcd_jobs.JobStatus.Substate): + Output only. Additional state information, + which includes status reported by the agent. + """ + + class State(proto.Enum): + r"""The job state.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + SETUP_DONE = 8 + RUNNING = 2 + CANCEL_PENDING = 3 + CANCEL_STARTED = 7 + CANCELLED = 4 + DONE = 5 + ERROR = 6 + ATTEMPT_FAILURE = 9 + + class Substate(proto.Enum): + r"""The job substate.""" + UNSPECIFIED = 0 + SUBMITTED = 1 + QUEUED = 2 + STALE_STATUS = 3 + + state = proto.Field(proto.ENUM, number=1, enum=State,) + + details = proto.Field(proto.STRING, number=2) + + state_start_time = proto.Field( + proto.MESSAGE, number=6, message=timestamp.Timestamp, + ) + + substate = proto.Field(proto.ENUM, number=7, enum=Substate,) + + +class JobReference(proto.Message): + r"""Encapsulates the full scoping used to reference a job. + + Attributes: + project_id (str): + Optional. The ID of the Google Cloud Platform + project that the job belongs to. If specified, + must match the request project ID. + job_id (str): + Optional. The job ID, which must be unique within the + project. + + The ID must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), or hyphens (-). The maximum length is 100 + characters. + + If not specified by the caller, the job ID will be provided + by the server. + """ + + project_id = proto.Field(proto.STRING, number=1) + + job_id = proto.Field(proto.STRING, number=2) + + +class YarnApplication(proto.Message): + r"""A YARN application created by a job. Application information is a + subset of + org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + name (str): + Required. The application name. + state (~.gcd_jobs.YarnApplication.State): + Required. The application state. + progress (float): + Required. The numerical progress of the + application, from 1 to 100. + tracking_url (str): + Optional. The HTTP URL of the + ApplicationMaster, HistoryServer, or + TimelineServer that provides application- + specific information. The URL uses the internal + hostname, and requires a proxy server for + resolution and, possibly, access. + """ + + class State(proto.Enum): + r"""The application state, corresponding to + YarnProtos.YarnApplicationStateProto. + """ + STATE_UNSPECIFIED = 0 + NEW = 1 + NEW_SAVING = 2 + SUBMITTED = 3 + ACCEPTED = 4 + RUNNING = 5 + FINISHED = 6 + FAILED = 7 + KILLED = 8 + + name = proto.Field(proto.STRING, number=1) + + state = proto.Field(proto.ENUM, number=2, enum=State,) + + progress = proto.Field(proto.FLOAT, number=3) + + tracking_url = proto.Field(proto.STRING, number=4) + + +class Job(proto.Message): + r"""A Dataproc job resource. + + Attributes: + reference (~.gcd_jobs.JobReference): + Optional. The fully qualified reference to the job, which + can be used to obtain the equivalent REST path of the job + resource. If this property is not specified when a job is + created, the server generates a job_id. + placement (~.gcd_jobs.JobPlacement): + Required. Job information, including how, + when, and where to run the job. + hadoop_job (~.gcd_jobs.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (~.gcd_jobs.SparkJob): + Optional. Job is a Spark job. + pyspark_job (~.gcd_jobs.PySparkJob): + Optional. Job is a PySpark job. + hive_job (~.gcd_jobs.HiveJob): + Optional. Job is a Hive job. + pig_job (~.gcd_jobs.PigJob): + Optional. Job is a Pig job. + spark_r_job (~.gcd_jobs.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (~.gcd_jobs.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (~.gcd_jobs.PrestoJob): + Optional. Job is a Presto job. + status (~.gcd_jobs.JobStatus): + Output only. The job status. Additional application-specific + status information may be contained in the type_job and + yarn_applications fields. + status_history (Sequence[~.gcd_jobs.JobStatus]): + Output only. The previous job status. + yarn_applications (Sequence[~.gcd_jobs.YarnApplication]): + Output only. The collection of YARN applications spun up by + this job. + + **Beta** Feature: This report is available for testing + purposes only. It may be changed before final release. + driver_output_resource_uri (str): + Output only. A URI pointing to the location + of the stdout of the job's driver program. + driver_control_files_uri (str): + Output only. If present, the location of miscellaneous + control files which may be used as part of job setup and + handling. If not present, control files may be placed in the + same location as ``driver_output_uri``. + labels (Sequence[~.gcd_jobs.Job.LabelsEntry]): + Optional. The labels to associate with this job. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a job. + scheduling (~.gcd_jobs.JobScheduling): + Optional. Job scheduling configuration. + job_uuid (str): + Output only. A UUID that uniquely identifies a job within + the project over time. This is in contrast to a + user-settable reference.job_id that may be reused over time. + done (bool): + Output only. Indicates whether the job is completed. If the + value is ``false``, the job is still in progress. If + ``true``, the job is completed, and ``status.state`` field + will indicate if it was successful, failed, or cancelled. + """ + + reference = proto.Field(proto.MESSAGE, number=1, message=JobReference,) + + placement = proto.Field(proto.MESSAGE, number=2, message=JobPlacement,) + + hadoop_job = proto.Field( + proto.MESSAGE, number=3, oneof="type_job", message=HadoopJob, + ) + + spark_job = proto.Field( + proto.MESSAGE, number=4, oneof="type_job", message=SparkJob, + ) + + pyspark_job = proto.Field( + proto.MESSAGE, number=5, oneof="type_job", message=PySparkJob, + ) + + hive_job = proto.Field(proto.MESSAGE, number=6, oneof="type_job", message=HiveJob,) + + pig_job = proto.Field(proto.MESSAGE, number=7, oneof="type_job", message=PigJob,) + + spark_r_job = proto.Field( + proto.MESSAGE, number=21, oneof="type_job", message=SparkRJob, + ) + + spark_sql_job = proto.Field( + proto.MESSAGE, number=12, oneof="type_job", message=SparkSqlJob, + ) + + presto_job = proto.Field( + proto.MESSAGE, number=23, oneof="type_job", message=PrestoJob, + ) + + status = proto.Field(proto.MESSAGE, number=8, message=JobStatus,) + + status_history = proto.RepeatedField(proto.MESSAGE, number=13, message=JobStatus,) + + yarn_applications = proto.RepeatedField( + proto.MESSAGE, number=9, message=YarnApplication, + ) + + driver_output_resource_uri = proto.Field(proto.STRING, number=17) + + driver_control_files_uri = proto.Field(proto.STRING, number=15) + + labels = proto.MapField(proto.STRING, proto.STRING, number=18) + + scheduling = proto.Field(proto.MESSAGE, number=20, message="JobScheduling",) + + job_uuid = proto.Field(proto.STRING, number=22) + + done = proto.Field(proto.BOOL, number=24) + + +class JobScheduling(proto.Message): + r"""Job scheduling options. + + Attributes: + max_failures_per_hour (int): + Optional. Maximum number of times per hour a + driver may be restarted as a result of driver + terminating with non-zero code before job is + reported failed. + + A job may be reported as thrashing if driver + exits with non-zero code 4 times within 10 + minute window. + + Maximum value is 10. + """ + + max_failures_per_hour = proto.Field(proto.INT32, number=1) + + +class SubmitJobRequest(proto.Message): + r"""A request to submit a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job (~.gcd_jobs.Job): + Required. The job resource. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] + requests with the same id, then the second request will be + ignored and the first [Job][google.cloud.dataproc.v1.Job] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job = proto.Field(proto.MESSAGE, number=2, message=Job,) + + request_id = proto.Field(proto.STRING, number=4) + + +class JobMetadata(proto.Message): + r"""Job Operation metadata. + + Attributes: + job_id (str): + Output only. The job id. + status (~.gcd_jobs.JobStatus): + Output only. Most recent job status. + operation_type (str): + Output only. Operation type. + start_time (~.timestamp.Timestamp): + Output only. Job submission time. + """ + + job_id = proto.Field(proto.STRING, number=1) + + status = proto.Field(proto.MESSAGE, number=2, message=JobStatus,) + + operation_type = proto.Field(proto.STRING, number=3) + + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class GetJobRequest(proto.Message): + r"""A request to get the resource representation for a job in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job_id = proto.Field(proto.STRING, number=2) + + +class ListJobsRequest(proto.Message): + r"""A request to list jobs in a project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + page_size (int): + Optional. The number of results to return in + each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + cluster_name (str): + Optional. If set, the returned jobs list + includes only jobs that were submitted to the + named cluster. + job_state_matcher (~.gcd_jobs.ListJobsRequest.JobStateMatcher): + Optional. Specifies enumerated categories of jobs to list. + (default = match ALL jobs). + + If ``filter`` is provided, ``jobStateMatcher`` will be + ignored. + filter (str): + Optional. A filter constraining the jobs to list. Filters + are case-sensitive and have the following syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, and + ``[KEY]`` is a label key. **value** can be ``*`` to match + all values. ``status.state`` can be either ``ACTIVE`` or + ``NON_ACTIVE``. Only the logical ``AND`` operator is + supported; space-separated items are treated as having an + implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + """ + + class JobStateMatcher(proto.Enum): + r"""A matcher that specifies categories of job states.""" + ALL = 0 + ACTIVE = 1 + NON_ACTIVE = 2 + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=6) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=4) + + job_state_matcher = proto.Field(proto.ENUM, number=5, enum=JobStateMatcher,) + + filter = proto.Field(proto.STRING, number=7) + + +class UpdateJobRequest(proto.Message): + r"""A request to update a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + job (~.gcd_jobs.Job): + Required. The changes to the job. + update_mask (~.field_mask.FieldMask): + Required. Specifies the path, relative to Job, of the field + to update. For example, to update the labels of a Job the + update_mask parameter would be specified as labels, and the + ``PATCH`` request body would specify the new value. Note: + Currently, labels is the only field that can be updated. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=2) + + job_id = proto.Field(proto.STRING, number=3) + + job = proto.Field(proto.MESSAGE, number=4, message=Job,) + + update_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,) + + +class ListJobsResponse(proto.Message): + r"""A list of jobs in a project. + + Attributes: + jobs (Sequence[~.gcd_jobs.Job]): + Output only. Jobs list. + next_page_token (str): + Optional. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ListJobsRequest. + """ + + @property + def raw_page(self): + return self + + jobs = proto.RepeatedField(proto.MESSAGE, number=1, message=Job,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class CancelJobRequest(proto.Message): + r"""A request to cancel a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job_id = proto.Field(proto.STRING, number=2) + + +class DeleteJobRequest(proto.Message): + r"""A request to delete a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job_id = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/operations.py b/google/cloud/dataproc_v1/types/operations.py new file mode 100644 index 00000000..e059814a --- /dev/null +++ b/google/cloud/dataproc_v1/types/operations.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", + manifest={"ClusterOperationStatus", "ClusterOperationMetadata",}, +) + + +class ClusterOperationStatus(proto.Message): + r"""The status of the operation. + + Attributes: + state (~.operations.ClusterOperationStatus.State): + Output only. A message containing the + operation state. + inner_state (str): + Output only. A message containing the + detailed operation state. + details (str): + Output only. A message containing any + operation metadata details. + state_start_time (~.timestamp.Timestamp): + Output only. The time this state was entered. + """ + + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + state = proto.Field(proto.ENUM, number=1, enum=State,) + + inner_state = proto.Field(proto.STRING, number=2) + + details = proto.Field(proto.STRING, number=3) + + state_start_time = proto.Field( + proto.MESSAGE, number=4, message=timestamp.Timestamp, + ) + + +class ClusterOperationMetadata(proto.Message): + r"""Metadata describing the operation. + + Attributes: + cluster_name (str): + Output only. Name of the cluster for the + operation. + cluster_uuid (str): + Output only. Cluster UUID for the operation. + status (~.operations.ClusterOperationStatus): + Output only. Current operation status. + status_history (Sequence[~.operations.ClusterOperationStatus]): + Output only. The previous operation status. + operation_type (str): + Output only. The operation type. + description (str): + Output only. Short description of operation. + labels (Sequence[~.operations.ClusterOperationMetadata.LabelsEntry]): + Output only. Labels associated with the + operation + warnings (Sequence[str]): + Output only. Errors encountered during + operation execution. + """ + + cluster_name = proto.Field(proto.STRING, number=7) + + cluster_uuid = proto.Field(proto.STRING, number=8) + + status = proto.Field(proto.MESSAGE, number=9, message=ClusterOperationStatus,) + + status_history = proto.RepeatedField( + proto.MESSAGE, number=10, message=ClusterOperationStatus, + ) + + operation_type = proto.Field(proto.STRING, number=11) + + description = proto.Field(proto.STRING, number=12) + + labels = proto.MapField(proto.STRING, proto.STRING, number=13) + + warnings = proto.RepeatedField(proto.STRING, number=14) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/shared.py b/google/cloud/dataproc_v1/types/shared.py new file mode 100644 index 00000000..df28f11e --- /dev/null +++ b/google/cloud/dataproc_v1/types/shared.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", manifest={"Component",}, +) + + +class Component(proto.Enum): + r"""Cluster components that can be activated.""" + COMPONENT_UNSPECIFIED = 0 + ANACONDA = 5 + HIVE_WEBHCAT = 3 + JUPYTER = 1 + PRESTO = 6 + ZEPPELIN = 4 + ZOOKEEPER = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/workflow_templates.py b/google/cloud/dataproc_v1/types/workflow_templates.py new file mode 100644 index 00000000..5d9182f1 --- /dev/null +++ b/google/cloud/dataproc_v1/types/workflow_templates.py @@ -0,0 +1,833 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import jobs as gcd_jobs +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", + manifest={ + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "ManagedCluster", + "ClusterSelector", + "OrderedJob", + "TemplateParameter", + "ParameterValidation", + "RegexValidation", + "ValueValidation", + "WorkflowMetadata", + "ClusterOperation", + "WorkflowGraph", + "WorkflowNode", + "CreateWorkflowTemplateRequest", + "GetWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "InstantiateInlineWorkflowTemplateRequest", + "UpdateWorkflowTemplateRequest", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "DeleteWorkflowTemplateRequest", + }, +) + + +class WorkflowTemplate(proto.Message): + r"""A Dataproc workflow template resource. + + Attributes: + id (str): + + name (str): + Output only. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. Used to perform a consistent read-modify-write. + + This field should be left blank for a + ``CreateWorkflowTemplate`` request. It is required for an + ``UpdateWorkflowTemplate`` request, and must match the + current server version. A typical update template flow would + fetch the current template with a ``GetWorkflowTemplate`` + request, which will return the current template with the + ``version`` field filled in with the current server version. + The user updates other fields in the template, then returns + it as part of the ``UpdateWorkflowTemplate`` request. + create_time (~.timestamp.Timestamp): + Output only. The time template was created. + update_time (~.timestamp.Timestamp): + Output only. The time template was last + updated. + labels (Sequence[~.workflow_templates.WorkflowTemplate.LabelsEntry]): + Optional. The labels to associate with this template. These + labels will be propagated to all jobs and clusters created + by the workflow instance. + + Label **keys** must contain 1 to 63 characters, and must + conform to `RFC + 1035 `__. + + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. + + No more than 32 labels can be associated with a template. + placement (~.workflow_templates.WorkflowTemplatePlacement): + Required. WorkflowTemplate scheduling + information. + jobs (Sequence[~.workflow_templates.OrderedJob]): + Required. The Directed Acyclic Graph of Jobs + to submit. + parameters (Sequence[~.workflow_templates.TemplateParameter]): + Optional. Template parameters whose values + are substituted into the template. Values for + parameters must be provided when the template is + instantiated. + """ + + id = proto.Field(proto.STRING, number=2) + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + placement = proto.Field( + proto.MESSAGE, number=7, message="WorkflowTemplatePlacement", + ) + + jobs = proto.RepeatedField(proto.MESSAGE, number=8, message="OrderedJob",) + + parameters = proto.RepeatedField( + proto.MESSAGE, number=9, message="TemplateParameter", + ) + + +class WorkflowTemplatePlacement(proto.Message): + r"""Specifies workflow execution target. + + Either ``managed_cluster`` or ``cluster_selector`` is required. + + Attributes: + managed_cluster (~.workflow_templates.ManagedCluster): + A cluster that is managed by the workflow. + cluster_selector (~.workflow_templates.ClusterSelector): + Optional. A selector that chooses target + cluster for jobs based on metadata. + + The selector is evaluated at the time each job + is submitted. + """ + + managed_cluster = proto.Field( + proto.MESSAGE, number=1, oneof="placement", message="ManagedCluster", + ) + + cluster_selector = proto.Field( + proto.MESSAGE, number=2, oneof="placement", message="ClusterSelector", + ) + + +class ManagedCluster(proto.Message): + r"""Cluster that is managed by the workflow. + + Attributes: + cluster_name (str): + Required. The cluster name prefix. A unique + cluster name will be formed by appending a + random suffix. + The name must contain only lower-case letters + (a-z), numbers (0-9), and hyphens (-). Must + begin with a letter. Cannot begin or end with + hyphen. Must consist of between 2 and 35 + characters. + config (~.clusters.ClusterConfig): + Required. The cluster configuration. + labels (Sequence[~.workflow_templates.ManagedCluster.LabelsEntry]): + Optional. The labels to associate with this cluster. + + Label keys must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given + cluster. + """ + + cluster_name = proto.Field(proto.STRING, number=2) + + config = proto.Field(proto.MESSAGE, number=3, message=clusters.ClusterConfig,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=4) + + +class ClusterSelector(proto.Message): + r"""A selector that chooses target cluster for jobs based on + metadata. + + Attributes: + zone (str): + Optional. The zone where workflow process + executes. This parameter does not affect the + selection of the cluster. + If unspecified, the zone of the first cluster + matching the selector is used. + cluster_labels (Sequence[~.workflow_templates.ClusterSelector.ClusterLabelsEntry]): + Required. The cluster labels. Cluster must + have all labels to match. + """ + + zone = proto.Field(proto.STRING, number=1) + + cluster_labels = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class OrderedJob(proto.Message): + r"""A job executed by the workflow. + + Attributes: + step_id (str): + Required. The step id. The id must be unique among all jobs + within the template. + + The step id is used as prefix for job id, as job + ``goog-dataproc-workflow-step-id`` label, and in + [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + field from other steps. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + hadoop_job (~.gcd_jobs.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (~.gcd_jobs.SparkJob): + Optional. Job is a Spark job. + pyspark_job (~.gcd_jobs.PySparkJob): + Optional. Job is a PySpark job. + hive_job (~.gcd_jobs.HiveJob): + Optional. Job is a Hive job. + pig_job (~.gcd_jobs.PigJob): + Optional. Job is a Pig job. + spark_r_job (~.gcd_jobs.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (~.gcd_jobs.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (~.gcd_jobs.PrestoJob): + Optional. Job is a Presto job. + labels (Sequence[~.workflow_templates.OrderedJob.LabelsEntry]): + Optional. The labels to associate with this job. + + Label keys must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given job. + scheduling (~.gcd_jobs.JobScheduling): + Optional. Job scheduling configuration. + prerequisite_step_ids (Sequence[str]): + Optional. The optional list of prerequisite job step_ids. If + not specified, the job will start at the beginning of + workflow. + """ + + step_id = proto.Field(proto.STRING, number=1) + + hadoop_job = proto.Field( + proto.MESSAGE, number=2, oneof="job_type", message=gcd_jobs.HadoopJob, + ) + + spark_job = proto.Field( + proto.MESSAGE, number=3, oneof="job_type", message=gcd_jobs.SparkJob, + ) + + pyspark_job = proto.Field( + proto.MESSAGE, number=4, oneof="job_type", message=gcd_jobs.PySparkJob, + ) + + hive_job = proto.Field( + proto.MESSAGE, number=5, oneof="job_type", message=gcd_jobs.HiveJob, + ) + + pig_job = proto.Field( + proto.MESSAGE, number=6, oneof="job_type", message=gcd_jobs.PigJob, + ) + + spark_r_job = proto.Field( + proto.MESSAGE, number=11, oneof="job_type", message=gcd_jobs.SparkRJob, + ) + + spark_sql_job = proto.Field( + proto.MESSAGE, number=7, oneof="job_type", message=gcd_jobs.SparkSqlJob, + ) + + presto_job = proto.Field( + proto.MESSAGE, number=12, oneof="job_type", message=gcd_jobs.PrestoJob, + ) + + labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + scheduling = proto.Field(proto.MESSAGE, number=9, message=gcd_jobs.JobScheduling,) + + prerequisite_step_ids = proto.RepeatedField(proto.STRING, number=10) + + +class TemplateParameter(proto.Message): + r"""A configurable parameter that replaces one or more fields in + the template. Parameterizable fields: + - Labels + - File uris + - Job properties + - Job arguments + - Script variables + - Main class (in HadoopJob and SparkJob) + - Zone (in ClusterSelector) + + Attributes: + name (str): + Required. Parameter name. The parameter name is used as the + key, and paired with the parameter value, which are passed + to the template when the template is instantiated. The name + must contain only capital letters (A-Z), numbers (0-9), and + underscores (_), and must not start with a number. The + maximum length is 40 characters. + fields (Sequence[str]): + Required. Paths to all fields that the parameter replaces. A + field is allowed to appear in at most one parameter's list + of field paths. + + A field path is similar in syntax to a + [google.protobuf.FieldMask][google.protobuf.FieldMask]. For + example, a field path that references the zone field of a + workflow template's cluster selector would be specified as + ``placement.clusterSelector.zone``. + + Also, field paths can reference fields using the following + syntax: + + - Values in maps can be referenced by key: + + - labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - placement.managedCluster.labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - jobs['step-id'].labels['key'] + + - Jobs in the jobs list can be referenced by step-id: + + - jobs['step-id'].hadoopJob.mainJarFileUri + - jobs['step-id'].hiveJob.queryFileUri + - jobs['step-id'].pySparkJob.mainPythonFileUri + - jobs['step-id'].hadoopJob.jarFileUris[0] + - jobs['step-id'].hadoopJob.archiveUris[0] + - jobs['step-id'].hadoopJob.fileUris[0] + - jobs['step-id'].pySparkJob.pythonFileUris[0] + + - Items in repeated fields can be referenced by a + zero-based index: + + - jobs['step-id'].sparkJob.args[0] + + - Other examples: + + - jobs['step-id'].hadoopJob.properties['key'] + - jobs['step-id'].hadoopJob.args[0] + - jobs['step-id'].hiveJob.scriptVariables['key'] + - jobs['step-id'].hadoopJob.mainJarFileUri + - placement.clusterSelector.zone + + It may not be possible to parameterize maps and repeated + fields in their entirety since only individual map values + and individual items in repeated fields can be referenced. + For example, the following field paths are invalid: + + - placement.clusterSelector.clusterLabels + - jobs['step-id'].sparkJob.args + description (str): + Optional. Brief description of the parameter. + Must not exceed 1024 characters. + validation (~.workflow_templates.ParameterValidation): + Optional. Validation rules to be applied to + this parameter's value. + """ + + name = proto.Field(proto.STRING, number=1) + + fields = proto.RepeatedField(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + validation = proto.Field(proto.MESSAGE, number=4, message="ParameterValidation",) + + +class ParameterValidation(proto.Message): + r"""Configuration for parameter validation. + + Attributes: + regex (~.workflow_templates.RegexValidation): + Validation based on regular expressions. + values (~.workflow_templates.ValueValidation): + Validation based on a list of allowed values. + """ + + regex = proto.Field( + proto.MESSAGE, number=1, oneof="validation_type", message="RegexValidation", + ) + + values = proto.Field( + proto.MESSAGE, number=2, oneof="validation_type", message="ValueValidation", + ) + + +class RegexValidation(proto.Message): + r"""Validation based on regular expressions. + + Attributes: + regexes (Sequence[str]): + Required. RE2 regular expressions used to + validate the parameter's value. The value must + match the regex in its entirety (substring + matches are not sufficient). + """ + + regexes = proto.RepeatedField(proto.STRING, number=1) + + +class ValueValidation(proto.Message): + r"""Validation based on a list of allowed values. + + Attributes: + values (Sequence[str]): + Required. List of allowed values for the + parameter. + """ + + values = proto.RepeatedField(proto.STRING, number=1) + + +class WorkflowMetadata(proto.Message): + r"""A Dataproc workflow template resource. + + Attributes: + template (str): + Output only. The resource name of the workflow template as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Output only. The version of template at the + time of workflow instantiation. + create_cluster (~.workflow_templates.ClusterOperation): + Output only. The create cluster operation + metadata. + graph (~.workflow_templates.WorkflowGraph): + Output only. The workflow graph. + delete_cluster (~.workflow_templates.ClusterOperation): + Output only. The delete cluster operation + metadata. + state (~.workflow_templates.WorkflowMetadata.State): + Output only. The workflow state. + cluster_name (str): + Output only. The name of the target cluster. + parameters (Sequence[~.workflow_templates.WorkflowMetadata.ParametersEntry]): + Map from parameter names to values that were + used for those parameters. + start_time (~.timestamp.Timestamp): + Output only. Workflow start time. + end_time (~.timestamp.Timestamp): + Output only. Workflow end time. + cluster_uuid (str): + Output only. The UUID of target cluster. + """ + + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + template = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + create_cluster = proto.Field(proto.MESSAGE, number=3, message="ClusterOperation",) + + graph = proto.Field(proto.MESSAGE, number=4, message="WorkflowGraph",) + + delete_cluster = proto.Field(proto.MESSAGE, number=5, message="ClusterOperation",) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + cluster_name = proto.Field(proto.STRING, number=7) + + parameters = proto.MapField(proto.STRING, proto.STRING, number=8) + + start_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=10, message=timestamp.Timestamp,) + + cluster_uuid = proto.Field(proto.STRING, number=11) + + +class ClusterOperation(proto.Message): + r"""The cluster operation triggered by a workflow. + + Attributes: + operation_id (str): + Output only. The id of the cluster operation. + error (str): + Output only. Error, if operation failed. + done (bool): + Output only. Indicates the operation is done. + """ + + operation_id = proto.Field(proto.STRING, number=1) + + error = proto.Field(proto.STRING, number=2) + + done = proto.Field(proto.BOOL, number=3) + + +class WorkflowGraph(proto.Message): + r"""The workflow graph. + + Attributes: + nodes (Sequence[~.workflow_templates.WorkflowNode]): + Output only. The workflow nodes. + """ + + nodes = proto.RepeatedField(proto.MESSAGE, number=1, message="WorkflowNode",) + + +class WorkflowNode(proto.Message): + r"""The workflow node. + + Attributes: + step_id (str): + Output only. The name of the node. + prerequisite_step_ids (Sequence[str]): + Output only. Node's prerequisite nodes. + job_id (str): + Output only. The job id; populated after the + node enters RUNNING state. + state (~.workflow_templates.WorkflowNode.NodeState): + Output only. The node state. + error (str): + Output only. The error detail. + """ + + class NodeState(proto.Enum): + r"""The workflow node state.""" + NODE_STATE_UNSPECIFIED = 0 + BLOCKED = 1 + RUNNABLE = 2 + RUNNING = 3 + COMPLETED = 4 + FAILED = 5 + + step_id = proto.Field(proto.STRING, number=1) + + prerequisite_step_ids = proto.RepeatedField(proto.STRING, number=2) + + job_id = proto.Field(proto.STRING, number=3) + + state = proto.Field(proto.ENUM, number=5, enum=NodeState,) + + error = proto.Field(proto.STRING, number=6) + + +class CreateWorkflowTemplateRequest(proto.Message): + r"""A request to create a workflow template. + + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + template (~.workflow_templates.WorkflowTemplate): + Required. The Dataproc workflow template to + create. + """ + + parent = proto.Field(proto.STRING, number=1) + + template = proto.Field(proto.MESSAGE, number=2, message=WorkflowTemplate,) + + +class GetWorkflowTemplateRequest(proto.Message): + r"""A request to fetch a workflow template. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + retrieve. Only previously instantiated versions + can be retrieved. + If unspecified, retrieves the current version. + """ + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + +class InstantiateWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate a workflow template. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + instantiate. If specified, the workflow will be + instantiated only if the current version of the + workflow template has the supplied version. + This option cannot be used to instantiate a + previous version of workflow template. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + parameters (Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]): + Optional. Map from parameter names to values + that should be used for those parameters. Values + may not exceed 100 characters. + """ + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + request_id = proto.Field(proto.STRING, number=5) + + parameters = proto.MapField(proto.STRING, proto.STRING, number=6) + + +class InstantiateInlineWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate an inline workflow template. + + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: ``projects/{project_id}/locations/{location}`` + template (~.workflow_templates.WorkflowTemplate): + Required. The workflow template to + instantiate. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + parent = proto.Field(proto.STRING, number=1) + + template = proto.Field(proto.MESSAGE, number=2, message=WorkflowTemplate,) + + request_id = proto.Field(proto.STRING, number=3) + + +class UpdateWorkflowTemplateRequest(proto.Message): + r"""A request to update a workflow template. + + Attributes: + template (~.workflow_templates.WorkflowTemplate): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + """ + + template = proto.Field(proto.MESSAGE, number=1, message=WorkflowTemplate,) + + +class ListWorkflowTemplatesRequest(proto.Message): + r"""A request to list workflow templates in a project. + + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListWorkflowTemplatesResponse(proto.Message): + r"""A response to a request to list workflow templates in a + project. + + Attributes: + templates (Sequence[~.workflow_templates.WorkflowTemplate]): + Output only. WorkflowTemplates list. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the page_token in a subsequent + ListWorkflowTemplatesRequest. + """ + + @property + def raw_page(self): + return self + + templates = proto.RepeatedField(proto.MESSAGE, number=1, message=WorkflowTemplate,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteWorkflowTemplateRequest(proto.Message): + r"""A request to delete a workflow template. + Currently started workflows will remain running. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + delete. If specified, will only delete the + template if the current server version matches + specified version. + """ + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1beta2/__init__.py b/google/cloud/dataproc_v1beta2/__init__.py index 689da3af..1a0d3c1a 100644 --- a/google/cloud/dataproc_v1beta2/__init__.py +++ b/google/cloud/dataproc_v1beta2/__init__.py @@ -1,70 +1,199 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# - -from __future__ import absolute_import -import sys -import warnings - -from google.cloud.dataproc_v1beta2 import types -from google.cloud.dataproc_v1beta2.gapic import autoscaling_policy_service_client -from google.cloud.dataproc_v1beta2.gapic import cluster_controller_client -from google.cloud.dataproc_v1beta2.gapic import enums -from google.cloud.dataproc_v1beta2.gapic import job_controller_client -from google.cloud.dataproc_v1beta2.gapic import workflow_template_service_client - - -if sys.version_info[:2] == (2, 7): - message = ( - "A future version of this library will drop support for Python 2.7. " - "More details about Python 2 support for Google Cloud Client Libraries " - "can be found at https://cloud.google.com/python/docs/python2-sunset/" - ) - warnings.warn(message, DeprecationWarning) - - -class AutoscalingPolicyServiceClient( - autoscaling_policy_service_client.AutoscalingPolicyServiceClient -): - __doc__ = autoscaling_policy_service_client.AutoscalingPolicyServiceClient.__doc__ - enums = enums - - -class ClusterControllerClient(cluster_controller_client.ClusterControllerClient): - __doc__ = cluster_controller_client.ClusterControllerClient.__doc__ - enums = enums - - -class JobControllerClient(job_controller_client.JobControllerClient): - __doc__ = job_controller_client.JobControllerClient.__doc__ - enums = enums - - -class WorkflowTemplateServiceClient( - workflow_template_service_client.WorkflowTemplateServiceClient -): - __doc__ = workflow_template_service_client.WorkflowTemplateServiceClient.__doc__ - enums = enums +from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient +from .services.cluster_controller import ClusterControllerClient +from .services.job_controller import JobControllerClient +from .services.workflow_template_service import WorkflowTemplateServiceClient +from .types.autoscaling_policies import AutoscalingPolicy +from .types.autoscaling_policies import BasicAutoscalingAlgorithm +from .types.autoscaling_policies import BasicYarnAutoscalingConfig +from .types.autoscaling_policies import CreateAutoscalingPolicyRequest +from .types.autoscaling_policies import DeleteAutoscalingPolicyRequest +from .types.autoscaling_policies import GetAutoscalingPolicyRequest +from .types.autoscaling_policies import InstanceGroupAutoscalingPolicyConfig +from .types.autoscaling_policies import ListAutoscalingPoliciesRequest +from .types.autoscaling_policies import ListAutoscalingPoliciesResponse +from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest +from .types.clusters import AcceleratorConfig +from .types.clusters import AutoscalingConfig +from .types.clusters import Cluster +from .types.clusters import ClusterConfig +from .types.clusters import ClusterMetrics +from .types.clusters import ClusterStatus +from .types.clusters import CreateClusterRequest +from .types.clusters import DeleteClusterRequest +from .types.clusters import DiagnoseClusterRequest +from .types.clusters import DiagnoseClusterResults +from .types.clusters import DiskConfig +from .types.clusters import EncryptionConfig +from .types.clusters import EndpointConfig +from .types.clusters import GceClusterConfig +from .types.clusters import GetClusterRequest +from .types.clusters import GkeClusterConfig +from .types.clusters import InstanceGroupConfig +from .types.clusters import KerberosConfig +from .types.clusters import LifecycleConfig +from .types.clusters import ListClustersRequest +from .types.clusters import ListClustersResponse +from .types.clusters import ManagedGroupConfig +from .types.clusters import NodeInitializationAction +from .types.clusters import ReservationAffinity +from .types.clusters import SecurityConfig +from .types.clusters import SoftwareConfig +from .types.clusters import UpdateClusterRequest +from .types.jobs import CancelJobRequest +from .types.jobs import DeleteJobRequest +from .types.jobs import GetJobRequest +from .types.jobs import HadoopJob +from .types.jobs import HiveJob +from .types.jobs import Job +from .types.jobs import JobMetadata +from .types.jobs import JobPlacement +from .types.jobs import JobReference +from .types.jobs import JobScheduling +from .types.jobs import JobStatus +from .types.jobs import ListJobsRequest +from .types.jobs import ListJobsResponse +from .types.jobs import LoggingConfig +from .types.jobs import PigJob +from .types.jobs import PrestoJob +from .types.jobs import PySparkJob +from .types.jobs import QueryList +from .types.jobs import SparkJob +from .types.jobs import SparkRJob +from .types.jobs import SparkSqlJob +from .types.jobs import SubmitJobRequest +from .types.jobs import UpdateJobRequest +from .types.jobs import YarnApplication +from .types.operations import ClusterOperationMetadata +from .types.operations import ClusterOperationStatus +from .types.shared import Component +from .types.workflow_templates import ClusterOperation +from .types.workflow_templates import ClusterSelector +from .types.workflow_templates import CreateWorkflowTemplateRequest +from .types.workflow_templates import DeleteWorkflowTemplateRequest +from .types.workflow_templates import GetWorkflowTemplateRequest +from .types.workflow_templates import InstantiateInlineWorkflowTemplateRequest +from .types.workflow_templates import InstantiateWorkflowTemplateRequest +from .types.workflow_templates import ListWorkflowTemplatesRequest +from .types.workflow_templates import ListWorkflowTemplatesResponse +from .types.workflow_templates import ManagedCluster +from .types.workflow_templates import OrderedJob +from .types.workflow_templates import ParameterValidation +from .types.workflow_templates import RegexValidation +from .types.workflow_templates import TemplateParameter +from .types.workflow_templates import UpdateWorkflowTemplateRequest +from .types.workflow_templates import ValueValidation +from .types.workflow_templates import WorkflowGraph +from .types.workflow_templates import WorkflowMetadata +from .types.workflow_templates import WorkflowNode +from .types.workflow_templates import WorkflowTemplate +from .types.workflow_templates import WorkflowTemplatePlacement __all__ = ( - "enums", - "types", + "AcceleratorConfig", + "AutoscalingConfig", + "AutoscalingPolicy", "AutoscalingPolicyServiceClient", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "CancelJobRequest", + "Cluster", + "ClusterConfig", "ClusterControllerClient", + "ClusterMetrics", + "ClusterOperation", + "ClusterOperationMetadata", + "ClusterOperationStatus", + "ClusterSelector", + "ClusterStatus", + "Component", + "CreateAutoscalingPolicyRequest", + "CreateClusterRequest", + "CreateWorkflowTemplateRequest", + "DeleteAutoscalingPolicyRequest", + "DeleteClusterRequest", + "DeleteJobRequest", + "DeleteWorkflowTemplateRequest", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "DiskConfig", + "EncryptionConfig", + "EndpointConfig", + "GceClusterConfig", + "GetAutoscalingPolicyRequest", + "GetClusterRequest", + "GetJobRequest", + "GetWorkflowTemplateRequest", + "GkeClusterConfig", + "HadoopJob", + "HiveJob", + "InstanceGroupAutoscalingPolicyConfig", + "InstanceGroupConfig", + "InstantiateInlineWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "Job", "JobControllerClient", + "JobMetadata", + "JobPlacement", + "JobReference", + "JobScheduling", + "JobStatus", + "KerberosConfig", + "LifecycleConfig", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + "ListClustersRequest", + "ListClustersResponse", + "ListJobsRequest", + "ListJobsResponse", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "LoggingConfig", + "ManagedCluster", + "ManagedGroupConfig", + "NodeInitializationAction", + "OrderedJob", + "ParameterValidation", + "PigJob", + "PrestoJob", + "PySparkJob", + "QueryList", + "RegexValidation", + "ReservationAffinity", + "SecurityConfig", + "SoftwareConfig", + "SparkJob", + "SparkRJob", + "SparkSqlJob", + "SubmitJobRequest", + "TemplateParameter", + "UpdateAutoscalingPolicyRequest", + "UpdateClusterRequest", + "UpdateJobRequest", + "UpdateWorkflowTemplateRequest", + "ValueValidation", + "WorkflowGraph", + "WorkflowMetadata", + "WorkflowNode", + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "YarnApplication", "WorkflowTemplateServiceClient", ) diff --git a/google/cloud/dataproc_v1beta2/gapic/__init__.py b/google/cloud/dataproc_v1beta2/gapic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py b/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py deleted file mode 100644 index 7783cf49..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client.py +++ /dev/null @@ -1,653 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1beta2 AutoscalingPolicyService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.dataproc_v1beta2.gapic import autoscaling_policy_service_client_config -from google.cloud.dataproc_v1beta2.gapic import enums -from google.cloud.dataproc_v1beta2.gapic.transports import ( - autoscaling_policy_service_grpc_transport, -) -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc -from google.protobuf import empty_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class AutoscalingPolicyServiceClient(object): - """ - The API interface for managing autoscaling policies in the - Cloud Dataproc API. - """ - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1beta2.AutoscalingPolicyService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - AutoscalingPolicyServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def autoscaling_policy_path(cls, project, location, autoscaling_policy): - """Return a fully-qualified autoscaling_policy string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}", - project=project, - location=location, - autoscaling_policy=autoscaling_policy, - ) - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def region_path(cls, project, region): - """Return a fully-qualified region string.""" - return google.api_core.path_template.expand( - "projects/{project}/regions/{region}", project=project, region=region, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.AutoscalingPolicyServiceGrpcTransport, - Callable[[~.Credentials, type], ~.AutoscalingPolicyServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = autoscaling_policy_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=autoscaling_policy_service_grpc_transport.AutoscalingPolicyServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = autoscaling_policy_service_grpc_transport.AutoscalingPolicyServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_autoscaling_policy( - self, - parent, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates new autoscaling policy. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.create_autoscaling_policy(parent, policy) - - Args: - parent (str): Required. The "resource name" of the region or location, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.create``, the resource - name has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.create``, the resource - name has the following format: - ``projects/{project_id}/locations/{location}`` - policy (Union[dict, ~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy]): Required. The autoscaling policy to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "create_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_autoscaling_policy, - default_retry=self._method_configs["CreateAutoscalingPolicy"].retry, - default_timeout=self._method_configs["CreateAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( - parent=parent, policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_autoscaling_policy( - self, - policy, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - >>> - >>> # TODO: Initialize `policy`: - >>> policy = {} - >>> - >>> response = client.update_autoscaling_policy(policy) - - Args: - policy (Union[dict, ~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy]): Required. The updated autoscaling policy. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "update_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_autoscaling_policy, - default_retry=self._method_configs["UpdateAutoscalingPolicy"].retry, - default_timeout=self._method_configs["UpdateAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( - policy=policy, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("policy.name", policy.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_autoscaling_policy( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves autoscaling policy. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.get_autoscaling_policy(name) - - Args: - name (str): Required. The "resource name" of the autoscaling policy, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.get``, the resource name - of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.get``, the resource name - of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "get_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_autoscaling_policy, - default_retry=self._method_configs["GetAutoscalingPolicy"].retry, - default_timeout=self._method_configs["GetAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.GetAutoscalingPolicyRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_autoscaling_policies( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists autoscaling policies in the project. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_autoscaling_policies(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_autoscaling_policies(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The "resource name" of the region or location, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.list``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.autoscalingPolicies.list``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1beta2.types.AutoscalingPolicy` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_autoscaling_policies" not in self._inner_api_calls: - self._inner_api_calls[ - "list_autoscaling_policies" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_autoscaling_policies, - default_retry=self._method_configs["ListAutoscalingPolicies"].retry, - default_timeout=self._method_configs["ListAutoscalingPolicies"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.ListAutoscalingPoliciesRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_autoscaling_policies"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="policies", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_autoscaling_policy( - self, - name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes an autoscaling policy. It is an error to delete an autoscaling - policy that is in use by one or more clusters. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> client.delete_autoscaling_policy(name) - - Args: - name (str): Required. The "resource name" of the autoscaling policy, as - described in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.autoscalingPolicies.delete``, the resource - name of the policy has the following format: - ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` - - - For ``projects.locations.autoscalingPolicies.delete``, the resource - name of the policy has the following format: - ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_autoscaling_policy" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_autoscaling_policy" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_autoscaling_policy, - default_retry=self._method_configs["DeleteAutoscalingPolicy"].retry, - default_timeout=self._method_configs["DeleteAutoscalingPolicy"].timeout, - client_info=self._client_info, - ) - - request = autoscaling_policies_pb2.DeleteAutoscalingPolicyRequest(name=name,) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_autoscaling_policy"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py b/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py deleted file mode 100644 index 3274e972..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/autoscaling_policy_service_client_config.py +++ /dev/null @@ -1,139 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1beta2.AutoscalingPolicyService": { - "retry_codes": { - "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "retry_policy_4_codes": ["UNAVAILABLE"], - "retry_policy_6_codes": ["UNAVAILABLE"], - "no_retry_codes": [], - "retry_policy_3_codes": [ - "INTERNAL", - "DEADLINE_EXCEEDED", - "UNAVAILABLE", - ], - "retry_policy_2_codes": ["UNAVAILABLE"], - "no_retry_1_codes": [], - "retry_policy_5_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "retry_policy_7_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_6_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_3_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_7_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_5_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "retry_policy_4_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "CreateAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - "UpdateAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "GetAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "ListAutoscalingPolicies": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_1_codes", - "retry_params_name": "retry_policy_1_params", - }, - "DeleteAutoscalingPolicy": { - "timeout_millis": 600000, - "retry_codes_name": "no_retry_1_codes", - "retry_params_name": "no_retry_1_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py deleted file mode 100644 index bdc99bf7..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client.py +++ /dev/null @@ -1,888 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1beta2 ClusterController API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import grpc - -from google.cloud.dataproc_v1beta2.gapic import cluster_controller_client_config -from google.cloud.dataproc_v1beta2.gapic import enums -from google.cloud.dataproc_v1beta2.gapic.transports import ( - cluster_controller_grpc_transport, -) -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import clusters_pb2 -from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class ClusterControllerClient(object): - """ - The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1beta2.ClusterController" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.ClusterControllerGrpcTransport, - Callable[[~.Credentials, type], ~.ClusterControllerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = cluster_controller_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=cluster_controller_grpc_transport.ClusterControllerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def create_cluster( - self, - project_id, - region, - cluster, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> response = client.create_cluster(project_id, region, cluster) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Cluster` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``CreateClusterRequest`` requests with the same id, then - the second request will be ignored and the first - ``google.longrunning.Operation`` created and stored in the backend is - returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "create_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_cluster, - default_retry=self._method_configs["CreateCluster"].retry, - default_timeout=self._method_configs["CreateCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.CreateClusterRequest( - project_id=project_id, - region=region, - cluster=cluster, - request_id=request_id, - ) - operation = self._inner_api_calls["create_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - clusters_pb2.Cluster, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def update_cluster( - self, - project_id, - region, - cluster_name, - cluster, - update_mask, - graceful_decommission_timeout=None, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> # TODO: Initialize `cluster`: - >>> cluster = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project the - cluster belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The changes to the cluster. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Cluster` - update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of the field - to update. For example, to change the number of workers in a cluster to - 5, the ``update_mask`` parameter would be specified as - ``config.worker_config.num_instances``, and the ``PATCH`` request body - would specify the new value, as follows: - - :: - - { - "config":{ - "workerConfig":{ - "numInstances":"5" - } - } - } - - Similarly, to change the number of preemptible workers in a cluster to - 5, the ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the ``PATCH`` - request body would be set as follows: - - :: - - { - "config":{ - "secondaryWorkerConfig":{ - "numInstances":"5" - } - } - } - - Note: currently only the following fields can be updated: - - .. raw:: html - - - - - - - - - - - - - - - - - - - - - - - - - - -
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker - group
config.secondary_worker_config.num_instancesResize secondary - worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL - duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL - deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL - duration
config.autoscaling_config.policy_uriUse, stop using, or change - autoscaling policies
- - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask` - graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1beta2.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful - decommissioning allows removing nodes from the cluster without - interrupting jobs in progress. Timeout specifies how long to wait for - jobs in progress to finish before forcefully removing nodes (and - potentially interrupting jobs). Default timeout is 0 (for forceful - decommission), and the maximum allowed timeout is 1 day (see JSON - representation of - `Duration `__). - - Only supported on Dataproc image versions 1.2 and higher. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Duration` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``UpdateClusterRequest`` requests with the same id, then - the second request will be ignored and the first - ``google.longrunning.Operation`` created and stored in the backend is - returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "update_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_cluster, - default_retry=self._method_configs["UpdateCluster"].retry, - default_timeout=self._method_configs["UpdateCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.UpdateClusterRequest( - project_id=project_id, - region=region, - cluster_name=cluster_name, - cluster=cluster, - update_mask=update_mask, - graceful_decommission_timeout=graceful_decommission_timeout, - request_id=request_id, - ) - operation = self._inner_api_calls["update_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - clusters_pb2.Cluster, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def delete_cluster( - self, - project_id, - region, - cluster_name, - cluster_uuid=None, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.delete_cluster(project_id, region, cluster_name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail - (with error NOT_FOUND) if cluster with specified UUID does not exist. - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``DeleteClusterRequest`` requests with the same id, then - the second request will be ignored and the first - ``google.longrunning.Operation`` created and stored in the backend is - returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_cluster, - default_retry=self._method_configs["DeleteCluster"].retry, - default_timeout=self._method_configs["DeleteCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.DeleteClusterRequest( - project_id=project_id, - region=region, - cluster_name=cluster_name, - cluster_uuid=cluster_uuid, - request_id=request_id, - ) - operation = self._inner_api_calls["delete_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def diagnose_cluster( - self, - project_id, - region, - cluster_name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - ``Empty``. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.diagnose_cluster(project_id, region, cluster_name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "diagnose_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "diagnose_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.diagnose_cluster, - default_retry=self._method_configs["DiagnoseCluster"].retry, - default_timeout=self._method_configs["DiagnoseCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name, - ) - operation = self._inner_api_calls["diagnose_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.ClusterOperationMetadata, - ) - - def get_cluster( - self, - project_id, - region, - cluster_name, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the resource representation for a cluster in a project. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `cluster_name`: - >>> cluster_name = '' - >>> - >>> response = client.get_cluster(project_id, region, cluster_name) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - cluster_name (str): Required. The cluster name. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_cluster" not in self._inner_api_calls: - self._inner_api_calls[ - "get_cluster" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_cluster, - default_retry=self._method_configs["GetCluster"].retry, - default_timeout=self._method_configs["GetCluster"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.GetClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name, - ) - return self._inner_api_calls["get_cluster"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_clusters( - self, - project_id, - region, - filter_=None, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists all regions/{region}/clusters in a project alphabetically. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.ClusterControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_clusters(project_id, region): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_clusters(project_id, region).pages: - ... for element in page: - ... # process element - ... pass - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the cluster - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - filter_ (str): Optional. A filter constraining the clusters to list. Filters are - case-sensitive and have the following syntax: - - field = value [AND [field = value]] ... - - where **field** is one of ``status.state``, ``clusterName``, or - ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*`` - to match all values. ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``, - ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, - ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the - ``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the - cluster provided at creation time. Only the logical ``AND`` operator is - supported; space-separated items are treated as having an implicit - ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND clusterName = mycluster AND labels.env = - staging AND labels.starred = \* - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_clusters" not in self._inner_api_calls: - self._inner_api_calls[ - "list_clusters" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_clusters, - default_retry=self._method_configs["ListClusters"].retry, - default_timeout=self._method_configs["ListClusters"].timeout, - client_info=self._client_info, - ) - - request = clusters_pb2.ListClustersRequest( - project_id=project_id, region=region, filter=filter_, page_size=page_size, - ) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_clusters"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="clusters", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator diff --git a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py b/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py deleted file mode 100644 index 43673451..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/cluster_controller_client_config.py +++ /dev/null @@ -1,76 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1beta2.ClusterController": { - "retry_codes": { - "no_retry_codes": [], - "retry_policy_3_codes": [ - "INTERNAL", - "DEADLINE_EXCEEDED", - "UNAVAILABLE", - ], - "retry_policy_2_codes": ["UNAVAILABLE"], - }, - "retry_params": { - "retry_policy_2_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_3_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - }, - "methods": { - "CreateCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "UpdateCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "DeleteCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "DiagnoseCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_2_codes", - "retry_params_name": "retry_policy_2_params", - }, - "GetCluster": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - "ListClusters": { - "timeout_millis": 300000, - "retry_codes_name": "retry_policy_3_codes", - "retry_params_name": "retry_policy_3_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1beta2/gapic/enums.py b/google/cloud/dataproc_v1beta2/gapic/enums.py deleted file mode 100644 index f9715a40..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/enums.py +++ /dev/null @@ -1,318 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Wrappers for protocol buffer enum types.""" - -import enum - - -class Component(enum.IntEnum): - """ - Cluster components that can be activated. - - Attributes: - COMPONENT_UNSPECIFIED (int): Unspecified component. - ANACONDA (int): The Anaconda python distribution. - DOCKER (int): Docker - DRUID (int): The Druid query engine. - FLINK (int): Flink - HIVE_WEBHCAT (int): The Hive Web HCatalog (the REST service for accessing HCatalog). - JUPYTER (int): The Jupyter Notebook. - KERBEROS (int): The Kerberos security feature. - PRESTO (int): The Presto query engine. - RANGER (int): The Ranger service. - SOLR (int): The Solr service. - ZEPPELIN (int): The Zeppelin notebook. - ZOOKEEPER (int): The Zookeeper service. - """ - - COMPONENT_UNSPECIFIED = 0 - ANACONDA = 5 - DOCKER = 13 - DRUID = 9 - FLINK = 14 - HIVE_WEBHCAT = 3 - JUPYTER = 1 - KERBEROS = 7 - PRESTO = 6 - RANGER = 12 - SOLR = 10 - ZEPPELIN = 4 - ZOOKEEPER = 8 - - -class ClusterOperationStatus(object): - class State(enum.IntEnum): - """ - The operation state. - - Attributes: - UNKNOWN (int): Unused. - PENDING (int): The operation has been created. - RUNNING (int): The operation is running. - DONE (int): The operation is done; either cancelled or completed. - """ - - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - -class ClusterStatus(object): - class State(enum.IntEnum): - """ - The cluster state. - - Attributes: - UNKNOWN (int): The cluster state is unknown. - CREATING (int): The cluster is being created and set up. It is not ready for use. - RUNNING (int): The cluster is currently running and healthy. It is ready for use. - ERROR (int): The cluster encountered an error. It is not ready for use. - DELETING (int): The cluster is being deleted. It cannot be used. - UPDATING (int): The cluster is being updated. It continues to accept and process jobs. - STOPPING (int): The cluster is being stopped. It cannot be used. - STOPPED (int): The cluster is currently stopped. It is not ready for use. - STARTING (int): The cluster is being started. It is not ready for use. - """ - - UNKNOWN = 0 - CREATING = 1 - RUNNING = 2 - ERROR = 3 - DELETING = 4 - UPDATING = 5 - STOPPING = 6 - STOPPED = 7 - STARTING = 8 - - class Substate(enum.IntEnum): - """ - The cluster substate. - - Attributes: - UNSPECIFIED (int): The cluster substate is unknown. - UNHEALTHY (int): The cluster is known to be in an unhealthy state - (for example, critical daemons are not running or HDFS capacity is - exhausted). - - Applies to RUNNING state. - STALE_STATUS (int): The agent-reported status is out of date (may occur if - Dataproc loses communication with Agent). - - Applies to RUNNING state. - """ - - UNSPECIFIED = 0 - UNHEALTHY = 1 - STALE_STATUS = 2 - - -class JobStatus(object): - class State(enum.IntEnum): - """ - The job state. - - Attributes: - STATE_UNSPECIFIED (int): The job state is unknown. - PENDING (int): The job is pending; it has been submitted, but is not yet running. - SETUP_DONE (int): Job has been received by the service and completed initial setup; - it will soon be submitted to the cluster. - RUNNING (int): The job is running on the cluster. - CANCEL_PENDING (int): A CancelJob request has been received, but is pending. - CANCEL_STARTED (int): Transient in-flight resources have been canceled, and the request to - cancel the running job has been issued to the cluster. - CANCELLED (int): The job cancellation was successful. - DONE (int): The job has completed successfully. - ERROR (int): The job has completed, but encountered an error. - ATTEMPT_FAILURE (int): Job attempt has failed. The detail field contains failure details for - this attempt. - - Applies to restartable jobs only. - """ - - STATE_UNSPECIFIED = 0 - PENDING = 1 - SETUP_DONE = 8 - RUNNING = 2 - CANCEL_PENDING = 3 - CANCEL_STARTED = 7 - CANCELLED = 4 - DONE = 5 - ERROR = 6 - ATTEMPT_FAILURE = 9 - - class Substate(enum.IntEnum): - """ - The job substate. - - Attributes: - UNSPECIFIED (int): The job substate is unknown. - SUBMITTED (int): The Job is submitted to the agent. - - Applies to RUNNING state. - QUEUED (int): The Job has been received and is awaiting execution (it may be waiting - for a condition to be met). See the "details" field for the reason for - the delay. - - Applies to RUNNING state. - STALE_STATUS (int): The agent-reported status is out of date, which may be caused by a - loss of communication between the agent and Dataproc. If the - agent does not send a timely update, the job will fail. - - Applies to RUNNING state. - """ - - UNSPECIFIED = 0 - SUBMITTED = 1 - QUEUED = 2 - STALE_STATUS = 3 - - -class ListJobsRequest(object): - class JobStateMatcher(enum.IntEnum): - """ - A matcher that specifies categories of job states. - - Attributes: - ALL (int): Match all jobs, regardless of state. - ACTIVE (int): Only match jobs in non-terminal states: PENDING, RUNNING, or - CANCEL_PENDING. - NON_ACTIVE (int): Only match jobs in terminal states: CANCELLED, DONE, or ERROR. - """ - - ALL = 0 - ACTIVE = 1 - NON_ACTIVE = 2 - - -class LoggingConfig(object): - class Level(enum.IntEnum): - """ - The Log4j level for job execution. When running an `Apache - Hive `__ job, Cloud Dataproc configures the - Hive client to an equivalent verbosity level. - - Attributes: - LEVEL_UNSPECIFIED (int): Level is unspecified. Use default level for log4j. - ALL (int): Use ALL level for log4j. - TRACE (int): Use TRACE level for log4j. - DEBUG (int): Use DEBUG level for log4j. - INFO (int): Use INFO level for log4j. - WARN (int): Use WARN level for log4j. - ERROR (int): Use ERROR level for log4j. - FATAL (int): Use FATAL level for log4j. - OFF (int): Turn off log4j. - """ - - LEVEL_UNSPECIFIED = 0 - ALL = 1 - TRACE = 2 - DEBUG = 3 - INFO = 4 - WARN = 5 - ERROR = 6 - FATAL = 7 - OFF = 8 - - -class ReservationAffinity(object): - class Type(enum.IntEnum): - """ - Indicates whether to consume capacity from an reservation or not. - - Attributes: - TYPE_UNSPECIFIED (int) - NO_RESERVATION (int): Do not consume from any allocated capacity. - ANY_RESERVATION (int): Consume any reservation available. - SPECIFIC_RESERVATION (int): Must consume from a specific reservation. Must specify key value fields - for specifying the reservations. - """ - - TYPE_UNSPECIFIED = 0 - NO_RESERVATION = 1 - ANY_RESERVATION = 2 - SPECIFIC_RESERVATION = 3 - - -class WorkflowMetadata(object): - class State(enum.IntEnum): - """ - The operation state. - - Attributes: - UNKNOWN (int): Unused. - PENDING (int): The operation has been created. - RUNNING (int): The operation is running. - DONE (int): The operation is done; either cancelled or completed. - """ - - UNKNOWN = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - - -class WorkflowNode(object): - class NodeState(enum.IntEnum): - """ - The workflow node state. - - Attributes: - NODE_STATUS_UNSPECIFIED (int): State is unspecified. - BLOCKED (int): The node is awaiting prerequisite node to finish. - RUNNABLE (int): The node is runnable but not running. - RUNNING (int): The node is running. - COMPLETED (int): The node completed successfully. - FAILED (int): The node failed. A node can be marked FAILED because - its ancestor or peer failed. - """ - - NODE_STATUS_UNSPECIFIED = 0 - BLOCKED = 1 - RUNNABLE = 2 - RUNNING = 3 - COMPLETED = 4 - FAILED = 5 - - -class YarnApplication(object): - class State(enum.IntEnum): - """ - The application state, corresponding to - YarnProtos.YarnApplicationStateProto. - - Attributes: - STATE_UNSPECIFIED (int): Status is unspecified. - NEW (int): Status is NEW. - NEW_SAVING (int): Status is NEW_SAVING. - SUBMITTED (int): Status is SUBMITTED. - ACCEPTED (int): Status is ACCEPTED. - RUNNING (int): Status is RUNNING. - FINISHED (int): Status is FINISHED. - FAILED (int): Status is FAILED. - KILLED (int): Status is KILLED. - """ - - STATE_UNSPECIFIED = 0 - NEW = 1 - NEW_SAVING = 2 - SUBMITTED = 3 - ACCEPTED = 4 - RUNNING = 5 - FINISHED = 6 - FAILED = 7 - KILLED = 8 diff --git a/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py b/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py deleted file mode 100644 index f3f12304..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/job_controller_client.py +++ /dev/null @@ -1,810 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1beta2 JobController API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import grpc - -from google.cloud.dataproc_v1beta2.gapic import enums -from google.cloud.dataproc_v1beta2.gapic import job_controller_client_config -from google.cloud.dataproc_v1beta2.gapic.transports import job_controller_grpc_transport -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import clusters_pb2 -from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import jobs_pb2 -from google.cloud.dataproc_v1beta2.proto import jobs_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class JobControllerClient(object): - """The JobController provides methods to manage jobs.""" - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1beta2.JobController" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - JobControllerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.JobControllerGrpcTransport, - Callable[[~.Credentials, type], ~.JobControllerGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = job_controller_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=job_controller_grpc_transport.JobControllerGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = job_controller_grpc_transport.JobControllerGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def submit_job( - self, - project_id, - region, - job, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Submits a job to a cluster. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> response = client.submit_job(project_id, region, job) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The job resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Job` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same id, then the - second request will be ignored and the first ``Job`` created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "submit_job" not in self._inner_api_calls: - self._inner_api_calls[ - "submit_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.submit_job, - default_retry=self._method_configs["SubmitJob"].retry, - default_timeout=self._method_configs["SubmitJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job, request_id=request_id, - ) - return self._inner_api_calls["submit_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def submit_job_as_operation( - self, - project_id, - region, - job, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Submits job to a cluster. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> response = client.submit_job_as_operation(project_id, region, job) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The job resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Job` - request_id (str): Optional. A unique id used to identify the request. If the server - receives two ``SubmitJobRequest`` requests with the same id, then the - second request will be ignored and the first ``Job`` created and stored - in the backend is returned. - - It is recommended to always set this value to a - `UUID `__. - - The id must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "submit_job_as_operation" not in self._inner_api_calls: - self._inner_api_calls[ - "submit_job_as_operation" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.submit_job_as_operation, - default_retry=self._method_configs["SubmitJobAsOperation"].retry, - default_timeout=self._method_configs["SubmitJobAsOperation"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job, request_id=request_id, - ) - operation = self._inner_api_calls["submit_job_as_operation"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - jobs_pb2.Job, - metadata_type=jobs_pb2.JobMetadata, - ) - - def get_job( - self, - project_id, - region, - job_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Gets the resource representation for a job in a project. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> response = client.get_job(project_id, region, job_id) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_job" not in self._inner_api_calls: - self._inner_api_calls[ - "get_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_job, - default_retry=self._method_configs["GetJob"].retry, - default_timeout=self._method_configs["GetJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.GetJobRequest( - project_id=project_id, region=region, job_id=job_id, - ) - return self._inner_api_calls["get_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_jobs( - self, - project_id, - region, - page_size=None, - cluster_name=None, - job_state_matcher=None, - filter_=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists regions/{region}/jobs in a project. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # Iterate over all results - >>> for element in client.list_jobs(project_id, region): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_jobs(project_id, region).pages: - ... for element in page: - ... # process element - ... pass - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - cluster_name (str): Optional. If set, the returned jobs list includes only jobs that were - submitted to the named cluster. - job_state_matcher (~google.cloud.dataproc_v1beta2.types.JobStateMatcher): Optional. Specifies enumerated categories of jobs to list. (default - = match ALL jobs). - - If ``filter`` is provided, ``jobStateMatcher`` will be ignored. - filter_ (str): Optional. A filter constraining the jobs to list. Filters are - case-sensitive and have the following syntax: - - [field = value] AND [field [= value]] ... - - where **field** is ``status.state`` or ``labels.[KEY]``, and ``[KEY]`` - is a label key. **value** can be ``*`` to match all values. - ``status.state`` can be either ``ACTIVE`` or ``NON_ACTIVE``. Only the - logical ``AND`` operator is supported; space-separated items are treated - as having an implicit ``AND`` operator. - - Example filter: - - status.state = ACTIVE AND labels.env = staging AND labels.starred = \* - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1beta2.types.Job` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_jobs" not in self._inner_api_calls: - self._inner_api_calls[ - "list_jobs" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_jobs, - default_retry=self._method_configs["ListJobs"].retry, - default_timeout=self._method_configs["ListJobs"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.ListJobsRequest( - project_id=project_id, - region=region, - page_size=page_size, - cluster_name=cluster_name, - job_state_matcher=job_state_matcher, - filter=filter_, - ) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_jobs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="jobs", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def update_job( - self, - project_id, - region, - job_id, - job, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a job in a project. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> # TODO: Initialize `job`: - >>> job = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_job(project_id, region, job_id, job, update_mask) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - job (Union[dict, ~google.cloud.dataproc_v1beta2.types.Job]): Required. The changes to the job. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.Job` - update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to Job, of the field to - update. For example, to update the labels of a Job the update_mask - parameter would be specified as labels, and the ``PATCH`` request body - would specify the new value. Note: Currently, labels is the only field - that can be updated. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_job" not in self._inner_api_calls: - self._inner_api_calls[ - "update_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_job, - default_retry=self._method_configs["UpdateJob"].retry, - default_timeout=self._method_configs["UpdateJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.UpdateJobRequest( - project_id=project_id, - region=region, - job_id=job_id, - job=job, - update_mask=update_mask, - ) - return self._inner_api_calls["update_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def cancel_job( - self, - project_id, - region, - job_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Starts a job cancellation request. To access the job resource after - cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> response = client.cancel_job(project_id, region, job_id) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "cancel_job" not in self._inner_api_calls: - self._inner_api_calls[ - "cancel_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.cancel_job, - default_retry=self._method_configs["CancelJob"].retry, - default_timeout=self._method_configs["CancelJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.CancelJobRequest( - project_id=project_id, region=region, job_id=job_id, - ) - return self._inner_api_calls["cancel_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def delete_job( - self, - project_id, - region, - job_id, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes the job from the project. If the job is active, the delete - fails, and the response returns ``FAILED_PRECONDITION``. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.JobControllerClient() - >>> - >>> # TODO: Initialize `project_id`: - >>> project_id = '' - >>> - >>> # TODO: Initialize `region`: - >>> region = '' - >>> - >>> # TODO: Initialize `job_id`: - >>> job_id = '' - >>> - >>> client.delete_job(project_id, region, job_id) - - Args: - project_id (str): Required. The ID of the Google Cloud Platform project that the job - belongs to. - region (str): Required. The Dataproc region in which to handle the request. - job_id (str): Required. The job ID. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_job" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_job" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_job, - default_retry=self._method_configs["DeleteJob"].retry, - default_timeout=self._method_configs["DeleteJob"].timeout, - client_info=self._client_info, - ) - - request = jobs_pb2.DeleteJobRequest( - project_id=project_id, region=region, job_id=job_id, - ) - self._inner_api_calls["delete_job"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py b/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py deleted file mode 100644 index 75561150..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/job_controller_client_config.py +++ /dev/null @@ -1,149 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1beta2.JobController": { - "retry_codes": { - "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "retry_policy_4_codes": ["UNAVAILABLE"], - "retry_policy_6_codes": ["UNAVAILABLE"], - "no_retry_codes": [], - "retry_policy_3_codes": [ - "INTERNAL", - "DEADLINE_EXCEEDED", - "UNAVAILABLE", - ], - "retry_policy_2_codes": ["UNAVAILABLE"], - "no_retry_1_codes": [], - "retry_policy_5_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - "retry_policy_7_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - }, - "retry_params": { - "retry_policy_1_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_6_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_2_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_3_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 300000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 300000, - "total_timeout_millis": 300000, - }, - "retry_policy_7_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_5_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "retry_policy_4_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 900000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 900000, - "total_timeout_millis": 900000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - "no_retry_1_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - }, - "methods": { - "SubmitJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_4_codes", - "retry_params_name": "retry_policy_4_params", - }, - "SubmitJobAsOperation": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_4_codes", - "retry_params_name": "retry_policy_4_params", - }, - "GetJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "ListJobs": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "UpdateJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_4_codes", - "retry_params_name": "retry_policy_4_params", - }, - "CancelJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_5_codes", - "retry_params_name": "retry_policy_5_params", - }, - "DeleteJob": { - "timeout_millis": 900000, - "retry_codes_name": "retry_policy_4_codes", - "retry_params_name": "retry_policy_4_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/__init__.py b/google/cloud/dataproc_v1beta2/gapic/transports/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/autoscaling_policy_service_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/autoscaling_policy_service_grpc_transport.py deleted file mode 100644 index 1e41f971..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/transports/autoscaling_policy_service_grpc_transport.py +++ /dev/null @@ -1,179 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers - -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc - - -class AutoscalingPolicyServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1beta2 AutoscalingPolicyService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "autoscaling_policy_service_stub": autoscaling_policies_pb2_grpc.AutoscalingPolicyServiceStub( - channel - ), - } - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.create_autoscaling_policy`. - - Creates new autoscaling policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].CreateAutoscalingPolicy - - @property - def update_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.update_autoscaling_policy`. - - Updates (replaces) autoscaling policy. - - Disabled check for update_mask, because all updates will be full - replacements. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].UpdateAutoscalingPolicy - - @property - def get_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.get_autoscaling_policy`. - - Retrieves autoscaling policy. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].GetAutoscalingPolicy - - @property - def list_autoscaling_policies(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.list_autoscaling_policies`. - - Lists autoscaling policies in the project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].ListAutoscalingPolicies - - @property - def delete_autoscaling_policy(self): - """Return the gRPC stub for :meth:`AutoscalingPolicyServiceClient.delete_autoscaling_policy`. - - Deletes an autoscaling policy. It is an error to delete an autoscaling - policy that is in use by one or more clusters. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["autoscaling_policy_service_stub"].DeleteAutoscalingPolicy diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py deleted file mode 100644 index c8bbc15c..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/transports/cluster_controller_grpc_transport.py +++ /dev/null @@ -1,204 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc - - -class ClusterControllerGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1beta2 ClusterController API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "cluster_controller_stub": clusters_pb2_grpc.ClusterControllerStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def create_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.create_cluster`. - - Creates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].CreateCluster - - @property - def update_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.update_cluster`. - - Updates a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].UpdateCluster - - @property - def delete_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.delete_cluster`. - - Deletes a cluster in a project. The returned ``Operation.metadata`` - will be - `ClusterOperationMetadata `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].DeleteCluster - - @property - def diagnose_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.diagnose_cluster`. - - Gets cluster diagnostic information. The returned - ``Operation.metadata`` will be - `ClusterOperationMetadata `__. - After the operation completes, ``Operation.response`` contains - ``Empty``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].DiagnoseCluster - - @property - def get_cluster(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.get_cluster`. - - Gets the resource representation for a cluster in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].GetCluster - - @property - def list_clusters(self): - """Return the gRPC stub for :meth:`ClusterControllerClient.list_clusters`. - - Lists all regions/{region}/clusters in a project alphabetically. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["cluster_controller_stub"].ListClusters diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py deleted file mode 100644 index 8b941307..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/transports/job_controller_grpc_transport.py +++ /dev/null @@ -1,212 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.dataproc_v1beta2.proto import jobs_pb2_grpc - - -class JobControllerGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1beta2 JobController API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "job_controller_stub": jobs_pb2_grpc.JobControllerStub(channel), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def submit_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.submit_job`. - - Submits a job to a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].SubmitJob - - @property - def submit_job_as_operation(self): - """Return the gRPC stub for :meth:`JobControllerClient.submit_job_as_operation`. - - Submits job to a cluster. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].SubmitJobAsOperation - - @property - def get_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.get_job`. - - Gets the resource representation for a job in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].GetJob - - @property - def list_jobs(self): - """Return the gRPC stub for :meth:`JobControllerClient.list_jobs`. - - Lists regions/{region}/jobs in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].ListJobs - - @property - def update_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.update_job`. - - Updates a job in a project. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].UpdateJob - - @property - def cancel_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.cancel_job`. - - Starts a job cancellation request. To access the job resource after - cancellation, call - `regions/{region}/jobs.list `__ - or - `regions/{region}/jobs.get `__. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].CancelJob - - @property - def delete_job(self): - """Return the gRPC stub for :meth:`JobControllerClient.delete_job`. - - Deletes the job from the project. If the job is active, the delete - fails, and the response returns ``FAILED_PRECONDITION``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["job_controller_stub"].DeleteJob diff --git a/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py b/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py deleted file mode 100644 index d2738246..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/transports/workflow_template_service_grpc_transport.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import google.api_core.grpc_helpers -import google.api_core.operations_v1 - -from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2_grpc - - -class WorkflowTemplateServiceGrpcTransport(object): - """gRPC transport class providing stubs for - google.cloud.dataproc.v1beta2 WorkflowTemplateService API. - - The transport provides access to the raw gRPC stubs, - which can be used to take advantage of advanced - features of gRPC. - """ - - # The scopes needed to make gRPC calls to all of the methods defined - # in this service. - _OAUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) - - def __init__( - self, channel=None, credentials=None, address="dataproc.googleapis.com:443" - ): - """Instantiate the transport class. - - Args: - channel (grpc.Channel): A ``Channel`` instance through - which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - address (str): The address where the service is hosted. - """ - # If both `channel` and `credentials` are specified, raise an - # exception (channels come with credentials baked in already). - if channel is not None and credentials is not None: - raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive.", - ) - - # Create the channel. - if channel is None: - channel = self.create_channel( - address=address, - credentials=credentials, - options={ - "grpc.max_send_message_length": -1, - "grpc.max_receive_message_length": -1, - }.items(), - ) - - self._channel = channel - - # gRPC uses objects called "stubs" that are bound to the - # channel and provide a basic method for each RPC. - self._stubs = { - "workflow_template_service_stub": workflow_templates_pb2_grpc.WorkflowTemplateServiceStub( - channel - ), - } - - # Because this API includes a method that returns a - # long-running operation (proto: google.longrunning.Operation), - # instantiate an LRO client. - self._operations_client = google.api_core.operations_v1.OperationsClient( - channel - ) - - @classmethod - def create_channel( - cls, address="dataproc.googleapis.com:443", credentials=None, **kwargs - ): - """Create and return a gRPC channel object. - - Args: - address (str): The host for the channel to use. - credentials (~.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - kwargs (dict): Keyword arguments, which are passed to the - channel creation. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return google.api_core.grpc_helpers.create_channel( - address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs - ) - - @property - def channel(self): - """The gRPC channel used by the transport. - - Returns: - grpc.Channel: A gRPC channel object. - """ - return self._channel - - @property - def instantiate_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.instantiate_workflow_template`. - - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].InstantiateWorkflowTemplate - - @property - def instantiate_inline_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.instantiate_inline_workflow_template`. - - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - ``CreateWorkflowTemplate``, ``InstantiateWorkflowTemplate``, - ``DeleteWorkflowTemplate``. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs[ - "workflow_template_service_stub" - ].InstantiateInlineWorkflowTemplate - - @property - def create_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.create_workflow_template`. - - Creates new workflow template. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].CreateWorkflowTemplate - - @property - def get_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.get_workflow_template`. - - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].GetWorkflowTemplate - - @property - def update_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.update_workflow_template`. - - Updates (replaces) workflow template. The updated template - must contain version that matches the current server version. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].UpdateWorkflowTemplate - - @property - def list_workflow_templates(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.list_workflow_templates`. - - Lists workflows that match the specified filter in the request. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].ListWorkflowTemplates - - @property - def delete_workflow_template(self): - """Return the gRPC stub for :meth:`WorkflowTemplateServiceClient.delete_workflow_template`. - - Deletes a workflow template. It does not cancel in-progress workflows. - - Returns: - Callable: A callable which accepts the appropriate - deserialized request object and returns a - deserialized response object. - """ - return self._stubs["workflow_template_service_stub"].DeleteWorkflowTemplate diff --git a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py deleted file mode 100644 index b77b32e5..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client.py +++ /dev/null @@ -1,960 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Accesses the google.cloud.dataproc.v1beta2 WorkflowTemplateService API.""" - -import functools -import pkg_resources -import warnings - -from google.oauth2 import service_account -import google.api_core.client_options -import google.api_core.gapic_v1.client_info -import google.api_core.gapic_v1.config -import google.api_core.gapic_v1.method -import google.api_core.gapic_v1.routing_header -import google.api_core.grpc_helpers -import google.api_core.operation -import google.api_core.operations_v1 -import google.api_core.page_iterator -import google.api_core.path_template -import grpc - -from google.cloud.dataproc_v1beta2.gapic import enums -from google.cloud.dataproc_v1beta2.gapic import workflow_template_service_client_config -from google.cloud.dataproc_v1beta2.gapic.transports import ( - workflow_template_service_grpc_transport, -) -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import clusters_pb2 -from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import jobs_pb2 -from google.cloud.dataproc_v1beta2.proto import jobs_pb2_grpc -from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2 -from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2_grpc -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution( - "google-cloud-dataproc", -).version - - -class WorkflowTemplateServiceClient(object): - """ - The API interface for managing Workflow Templates in the - Dataproc API. - """ - - SERVICE_ADDRESS = "dataproc.googleapis.com:443" - """The default address of the service.""" - - # The name of the interface for this client. This is the key used to - # find the method configuration in the client_config dictionary. - _INTERFACE_NAME = "google.cloud.dataproc.v1beta2.WorkflowTemplateService" - - @classmethod - def from_service_account_file(cls, filename, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - WorkflowTemplateServiceClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file(filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @classmethod - def location_path(cls, project, location): - """Return a fully-qualified location string.""" - return google.api_core.path_template.expand( - "projects/{project}/locations/{location}", - project=project, - location=location, - ) - - @classmethod - def region_path(cls, project, region): - """Return a fully-qualified region string.""" - return google.api_core.path_template.expand( - "projects/{project}/regions/{region}", project=project, region=region, - ) - - @classmethod - def workflow_template_path(cls, project, region, workflow_template): - """Return a fully-qualified workflow_template string.""" - return google.api_core.path_template.expand( - "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}", - project=project, - region=region, - workflow_template=workflow_template, - ) - - def __init__( - self, - transport=None, - channel=None, - credentials=None, - client_config=None, - client_info=None, - client_options=None, - ): - """Constructor. - - Args: - transport (Union[~.WorkflowTemplateServiceGrpcTransport, - Callable[[~.Credentials, type], ~.WorkflowTemplateServiceGrpcTransport]): A transport - instance, responsible for actually making the API calls. - The default transport uses the gRPC protocol. - This argument may also be a callable which returns a - transport instance. Callables will be sent the credentials - as the first argument and the default transport class as - the second argument. - channel (grpc.Channel): DEPRECATED. A ``Channel`` instance - through which to make calls. This argument is mutually exclusive - with ``credentials``; providing both will raise an exception. - credentials (google.auth.credentials.Credentials): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is mutually exclusive with providing a - transport instance to ``transport``; doing so will raise - an exception. - client_config (dict): DEPRECATED. A dictionary of call options for - each method. If not specified, the default configuration is used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - client_options (Union[dict, google.api_core.client_options.ClientOptions]): - Client options used to set user options on the client. API Endpoint - should be set through client_options. - """ - # Raise deprecation warnings for things we want to go away. - if client_config is not None: - warnings.warn( - "The `client_config` argument is deprecated.", - PendingDeprecationWarning, - stacklevel=2, - ) - else: - client_config = workflow_template_service_client_config.config - - if channel: - warnings.warn( - "The `channel` argument is deprecated; use " "`transport` instead.", - PendingDeprecationWarning, - stacklevel=2, - ) - - api_endpoint = self.SERVICE_ADDRESS - if client_options: - if type(client_options) == dict: - client_options = google.api_core.client_options.from_dict( - client_options - ) - if client_options.api_endpoint: - api_endpoint = client_options.api_endpoint - - # Instantiate the transport. - # The transport is responsible for handling serialization and - # deserialization and actually sending data to the service. - if transport: - if callable(transport): - self.transport = transport( - credentials=credentials, - default_class=workflow_template_service_grpc_transport.WorkflowTemplateServiceGrpcTransport, - address=api_endpoint, - ) - else: - if credentials: - raise ValueError( - "Received both a transport instance and " - "credentials; these are mutually exclusive." - ) - self.transport = transport - else: - self.transport = workflow_template_service_grpc_transport.WorkflowTemplateServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials, - ) - - if client_info is None: - client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION, - ) - else: - client_info.gapic_version = _GAPIC_LIBRARY_VERSION - self._client_info = client_info - - # Parse out the default settings for retry and timeout for each RPC - # from the client configuration. - # (Ordinarily, these are the defaults specified in the `*_config.py` - # file next to this one.) - self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME], - ) - - # Save a dictionary of cached API call functions. - # These are the actual callables which invoke the proper - # transport methods, wrapped with `wrap_method` to add retry, - # timeout, and the like. - self._inner_api_calls = {} - - # Service calls - def instantiate_workflow_template( - self, - name, - version=None, - instance_id=None, - request_id=None, - parameters=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Instantiates a template and begins execution. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.instantiate_workflow_template(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.instantiate``, the resource - name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to instantiate. If specified, - the workflow will be instantiated only if the current version of - the workflow template has the supplied version. - - This option cannot be used to instantiate a previous version of - workflow template. - instance_id (str): Deprecated. Please use ``request_id`` field instead. - request_id (str): Optional. A tag that prevents multiple concurrent workflow instances - with the same tag from running. This mitigates risk of concurrent - instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - parameters (dict[str -> str]): Optional. Map from parameter names to values that should be used for those - parameters. Values may not exceed 100 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "instantiate_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "instantiate_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.instantiate_workflow_template, - default_retry=self._method_configs["InstantiateWorkflowTemplate"].retry, - default_timeout=self._method_configs[ - "InstantiateWorkflowTemplate" - ].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest( - name=name, - version=version, - instance_id=instance_id, - request_id=request_id, - parameters=parameters, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["instantiate_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates_pb2.WorkflowMetadata, - ) - - def instantiate_inline_workflow_template( - self, - parent, - template, - instance_id=None, - request_id=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - ``CreateWorkflowTemplate``, ``InstantiateWorkflowTemplate``, - ``DeleteWorkflowTemplate``. - - The returned Operation can be used to track execution of workflow by - polling ``operations.get``. The Operation will complete when entire - workflow is finished. - - The running workflow can be aborted via ``operations.cancel``. This will - cause any inflight jobs to be cancelled and workflow-owned clusters to - be deleted. - - The ``Operation.metadata`` will be - `WorkflowMetadata `__. - Also see `Using - WorkflowMetadata `__. - - On successful completion, ``Operation.response`` will be ``Empty``. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.instantiate_inline_workflow_template(parent, template) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,instantiateinline``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.instantiateinline``, the - resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The workflow template to instantiate. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` - instance_id (str): Deprecated. Please use ``request_id`` field instead. - request_id (str): Optional. A tag that prevents multiple concurrent workflow instances - with the same tag from running. This mitigates risk of concurrent - instances started due to retries. - - It is recommended to always set this value to a - `UUID `__. - - The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). The maximum length is 40 characters. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "instantiate_inline_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "instantiate_inline_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.instantiate_inline_workflow_template, - default_retry=self._method_configs[ - "InstantiateInlineWorkflowTemplate" - ].retry, - default_timeout=self._method_configs[ - "InstantiateInlineWorkflowTemplate" - ].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.InstantiateInlineWorkflowTemplateRequest( - parent=parent, - template=template, - instance_id=instance_id, - request_id=request_id, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - operation = self._inner_api_calls["instantiate_inline_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=workflow_templates_pb2.WorkflowMetadata, - ) - - def create_workflow_template( - self, - parent, - template, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Creates new workflow template. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.create_workflow_template(parent, template) - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,create``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The Dataproc workflow template to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "create_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "create_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_workflow_template, - default_retry=self._method_configs["CreateWorkflowTemplate"].retry, - default_timeout=self._method_configs["CreateWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["create_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def get_workflow_template( - self, - name, - version=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> response = client.get_workflow_template(name) - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.get``, the resource name of - the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.get``, the resource name - of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to retrieve. Only previously - instantiated versions can be retrieved. - - If unspecified, retrieves the current version. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "get_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "get_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_workflow_template, - default_retry=self._method_configs["GetWorkflowTemplate"].retry, - default_timeout=self._method_configs["GetWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.GetWorkflowTemplateRequest( - name=name, version=version, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["get_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def update_workflow_template( - self, - template, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates (replaces) workflow template. The updated template - must contain version that matches the current server version. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `template`: - >>> template = {} - >>> - >>> response = client.update_workflow_template(template) - - Args: - template (Union[dict, ~google.cloud.dataproc_v1beta2.types.WorkflowTemplate]): Required. The updated workflow template. - - The ``template.version`` field must match the current version. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "update_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_workflow_template, - default_retry=self._method_configs["UpdateWorkflowTemplate"].retry, - default_timeout=self._method_configs["UpdateWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.UpdateWorkflowTemplateRequest( - template=template, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("template.name", template.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - - def list_workflow_templates( - self, - parent, - page_size=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Lists workflows that match the specified filter in the request. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> parent = client.region_path('[PROJECT]', '[REGION]') - >>> - >>> # Iterate over all results - >>> for element in client.list_workflow_templates(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: - >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_workflow_templates(parent).pages: - ... for element in page: - ... # process element - ... pass - - Args: - parent (str): Required. The resource name of the region or location, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates,list``, the resource name of - the region has the following format: - ``projects/{project_id}/regions/{region}`` - - - For ``projects.locations.workflowTemplates.list``, the resource name - of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.dataproc_v1beta2.types.WorkflowTemplate` instances. - You can also iterate over the pages of the response - using its `pages` property. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "list_workflow_templates" not in self._inner_api_calls: - self._inner_api_calls[ - "list_workflow_templates" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_workflow_templates, - default_retry=self._method_configs["ListWorkflowTemplates"].retry, - default_timeout=self._method_configs["ListWorkflowTemplates"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.ListWorkflowTemplatesRequest( - parent=parent, page_size=page_size, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("parent", parent)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_workflow_templates"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="templates", - request_token_field="page_token", - response_token_field="next_page_token", - ) - return iterator - - def delete_workflow_template( - self, - name, - version=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Deletes a workflow template. It does not cancel in-progress workflows. - - Example: - >>> from google.cloud import dataproc_v1beta2 - >>> - >>> client = dataproc_v1beta2.WorkflowTemplateServiceClient() - >>> - >>> # TODO: Initialize `name`: - >>> name = '' - >>> - >>> client.delete_workflow_template(name) - - Args: - name (str): Required. The resource name of the workflow template, as described - in https://cloud.google.com/apis/design/resource_names. - - - For ``projects.regions.workflowTemplates.delete``, the resource name - of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` - - - For ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` - version (int): Optional. The version of workflow template to delete. If specified, - will only delete the template if the current server version matches - specified version. - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "delete_workflow_template" not in self._inner_api_calls: - self._inner_api_calls[ - "delete_workflow_template" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_workflow_template, - default_retry=self._method_configs["DeleteWorkflowTemplate"].retry, - default_timeout=self._method_configs["DeleteWorkflowTemplate"].timeout, - client_info=self._client_info, - ) - - request = workflow_templates_pb2.DeleteWorkflowTemplateRequest( - name=name, version=version, - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("name", name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - self._inner_api_calls["delete_workflow_template"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) diff --git a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py b/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py deleted file mode 100644 index b086ceb1..00000000 --- a/google/cloud/dataproc_v1beta2/gapic/workflow_template_service_client_config.py +++ /dev/null @@ -1,81 +0,0 @@ -config = { - "interfaces": { - "google.cloud.dataproc.v1beta2.WorkflowTemplateService": { - "retry_codes": { - "retry_policy_6_codes": ["UNAVAILABLE"], - "no_retry_codes": [], - "retry_policy_7_codes": [ - "DEADLINE_EXCEEDED", - "INTERNAL", - "UNAVAILABLE", - ], - }, - "retry_params": { - "retry_policy_6_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "retry_policy_7_params": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 600000, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 600000, - "total_timeout_millis": 600000, - }, - "no_retry_params": { - "initial_retry_delay_millis": 0, - "retry_delay_multiplier": 0.0, - "max_retry_delay_millis": 0, - "initial_rpc_timeout_millis": 0, - "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 0, - "total_timeout_millis": 0, - }, - }, - "methods": { - "InstantiateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - "InstantiateInlineWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - "CreateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - "GetWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_7_codes", - "retry_params_name": "retry_policy_7_params", - }, - "UpdateWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - "ListWorkflowTemplates": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_7_codes", - "retry_params_name": "retry_policy_7_params", - }, - "DeleteWorkflowTemplate": { - "timeout_millis": 600000, - "retry_codes_name": "retry_policy_6_codes", - "retry_params_name": "retry_policy_6_params", - }, - }, - } - } -} diff --git a/google/cloud/dataproc_v1beta2/proto/__init__.py b/google/cloud/dataproc_v1beta2/proto/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py b/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py deleted file mode 100644 index ac8b00ac..00000000 --- a/google/cloud/dataproc_v1beta2/proto/autoscaling_policies_pb2.py +++ /dev/null @@ -1,1211 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto", - package="google.cloud.dataproc.v1beta2", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.dataproc.v1beta2B\030AutoscalingPoliciesProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc\352AE\n\036dataproc.googleapis.com/Region\022#projects/{project}/regions/{region}", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n>google/cloud/dataproc_v1beta2/proto/autoscaling_policies.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1bgoogle/protobuf/empty.proto"\xb9\x04\n\x11\x41utoscalingPolicy\x12\x0f\n\x02id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04name\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12S\n\x0f\x62\x61sic_algorithm\x18\x03 \x01(\x0b\x32\x38.google.cloud.dataproc.v1beta2.BasicAutoscalingAlgorithmH\x00\x12_\n\rworker_config\x18\x04 \x01(\x0b\x32\x43.google.cloud.dataproc.v1beta2.InstanceGroupAutoscalingPolicyConfigB\x03\xe0\x41\x02\x12i\n\x17secondary_worker_config\x18\x05 \x01(\x0b\x32\x43.google.cloud.dataproc.v1beta2.InstanceGroupAutoscalingPolicyConfigB\x03\xe0\x41\x01:\xd1\x01\xea\x41\xcd\x01\n)dataproc.googleapis.com/AutoscalingPolicy\x12Pprojects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}\x12Lprojects/{project}/regions/{region}/autoscalingPolicies/{autoscaling_policy} \x01\x42\x0b\n\talgorithm"\xa9\x01\n\x19\x42\x61sicAutoscalingAlgorithm\x12S\n\x0byarn_config\x18\x01 \x01(\x0b\x32\x39.google.cloud.dataproc.v1beta2.BasicYarnAutoscalingConfigB\x03\xe0\x41\x02\x12\x37\n\x0f\x63ooldown_period\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01"\xf9\x01\n\x1a\x42\x61sicYarnAutoscalingConfig\x12\x45\n\x1dgraceful_decommission_timeout\x18\x05 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x02\x12\x1c\n\x0fscale_up_factor\x18\x01 \x01(\x01\x42\x03\xe0\x41\x02\x12\x1e\n\x11scale_down_factor\x18\x02 \x01(\x01\x42\x03\xe0\x41\x02\x12)\n\x1cscale_up_min_worker_fraction\x18\x03 \x01(\x01\x42\x03\xe0\x41\x01\x12+\n\x1escale_down_min_worker_fraction\x18\x04 \x01(\x01\x42\x03\xe0\x41\x01"s\n$InstanceGroupAutoscalingPolicyConfig\x12\x1a\n\rmin_instances\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01\x12\x1a\n\rmax_instances\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x13\n\x06weight\x18\x03 \x01(\x05\x42\x03\xe0\x41\x01"\xaa\x01\n\x1e\x43reateAutoscalingPolicyRequest\x12\x41\n\x06parent\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\x12)dataproc.googleapis.com/AutoscalingPolicy\x12\x45\n\x06policy\x18\x02 \x01(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AutoscalingPolicyB\x03\xe0\x41\x02"^\n\x1bGetAutoscalingPolicyRequest\x12?\n\x04name\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\n)dataproc.googleapis.com/AutoscalingPolicy"g\n\x1eUpdateAutoscalingPolicyRequest\x12\x45\n\x06policy\x18\x01 \x01(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AutoscalingPolicyB\x03\xe0\x41\x02"a\n\x1e\x44\x65leteAutoscalingPolicyRequest\x12?\n\x04name\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\n)dataproc.googleapis.com/AutoscalingPolicy"\x94\x01\n\x1eListAutoscalingPoliciesRequest\x12\x41\n\x06parent\x18\x01 \x01(\tB1\xe0\x41\x02\xfa\x41+\x12)dataproc.googleapis.com/AutoscalingPolicy\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"\x88\x01\n\x1fListAutoscalingPoliciesResponse\x12G\n\x08policies\x18\x01 \x03(\x0b\x32\x30.google.cloud.dataproc.v1beta2.AutoscalingPolicyB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03\x32\x8f\x0c\n\x18\x41utoscalingPolicyService\x12\xb0\x02\n\x17\x43reateAutoscalingPolicy\x12=.google.cloud.dataproc.v1beta2.CreateAutoscalingPolicyRequest\x1a\x30.google.cloud.dataproc.v1beta2.AutoscalingPolicy"\xa3\x01\x82\xd3\xe4\x93\x02\x8c\x01".google.cloud.dataproc.v1beta2.ListAutoscalingPoliciesResponse"\x8b\x01\x82\xd3\xe4\x93\x02|\x12.google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry\x12T\n\x0cyarn_metrics\x18\x02 \x03(\x0b\x32>.google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry\x1a\x32\n\x10HdfsMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1a\x32\n\x10YarnMetricsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01"\x9b\x01\n\x14\x43reateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12<\n\x07\x63luster\x18\x02 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"\xb3\x02\n\x14UpdateClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x05 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12<\n\x07\x63luster\x18\x03 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x02\x12\x45\n\x1dgraceful_decommission_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.DurationB\x03\xe0\x41\x01\x12\x34\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x07 \x01(\tB\x03\xe0\x41\x01"\x93\x01\n\x14\x44\x65leteClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x17\n\nrequest_id\x18\x05 \x01(\tB\x03\xe0\x41\x01"\\\n\x11GetClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"\x89\x01\n\x13ListClustersRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x04 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06\x66ilter\x18\x05 \x01(\tB\x03\xe0\x41\x01\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01"s\n\x14ListClustersResponse\x12=\n\x08\x63lusters\x18\x01 \x03(\x0b\x32&.google.cloud.dataproc.v1beta2.ClusterB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"a\n\x16\x44iagnoseClusterRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_name\x18\x02 \x01(\tB\x03\xe0\x41\x02"1\n\x16\x44iagnoseClusterResults\x12\x17\n\noutput_uri\x18\x01 \x01(\tB\x03\xe0\x41\x03"\xfd\x01\n\x13ReservationAffinity\x12^\n\x18\x63onsume_reservation_type\x18\x01 \x01(\x0e\x32\x37.google.cloud.dataproc.v1beta2.ReservationAffinity.TypeB\x03\xe0\x41\x01\x12\x10\n\x03key\x18\x02 \x01(\tB\x03\xe0\x41\x01\x12\x13\n\x06values\x18\x03 \x03(\tB\x03\xe0\x41\x01"_\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0eNO_RESERVATION\x10\x01\x12\x13\n\x0f\x41NY_RESERVATION\x10\x02\x12\x18\n\x14SPECIFIC_RESERVATION\x10\x03\x32\xe7\r\n\x11\x43lusterController\x12\x91\x02\n\rCreateCluster\x12\x33.google.cloud.dataproc.v1beta2.CreateClusterRequest\x1a\x1d.google.longrunning.Operation"\xab\x01\x82\xd3\xe4\x93\x02\x43"8/v1beta2/projects/{project_id}/regions/{region}/clusters:\x07\x63luster\xda\x41\x1bproject_id, region, cluster\xca\x41\x41\n\x07\x43luster\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xbb\x02\n\rUpdateCluster\x12\x33.google.cloud.dataproc.v1beta2.UpdateClusterRequest\x1a\x1d.google.longrunning.Operation"\xd5\x01\x82\xd3\xe4\x93\x02R2G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\x07\x63luster\xda\x41\x36project_id, region, cluster_name, cluster, update_mask\xca\x41\x41\n\x07\x43luster\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xaa\x02\n\rDeleteCluster\x12\x33.google.cloud.dataproc.v1beta2.DeleteClusterRequest\x1a\x1d.google.longrunning.Operation"\xc4\x01\x82\xd3\xe4\x93\x02I*G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41 project_id, region, cluster_name\xca\x41O\n\x15google.protobuf.Empty\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x12\xda\x01\n\nGetCluster\x12\x30.google.cloud.dataproc.v1beta2.GetClusterRequest\x1a&.google.cloud.dataproc.v1beta2.Cluster"r\x82\xd3\xe4\x93\x02I\x12G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\xda\x41 project_id, region, cluster_name\x12\xeb\x01\n\x0cListClusters\x12\x32.google.cloud.dataproc.v1beta2.ListClustersRequest\x1a\x33.google.cloud.dataproc.v1beta2.ListClustersResponse"r\x82\xd3\xe4\x93\x02:\x12\x38/v1beta2/projects/{project_id}/regions/{region}/clusters\xda\x41\x12project_id, region\xda\x41\x1aproject_id, region, filter\x12\xba\x02\n\x0f\x44iagnoseCluster\x12\x35.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest\x1a\x1d.google.longrunning.Operation"\xd0\x01\x82\xd3\xe4\x93\x02U"P/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\x01*\xda\x41 project_id, region, cluster_name\xca\x41O\n\x15google.protobuf.Empty\x12\x36google.cloud.dataproc.v1beta2.ClusterOperationMetadata\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB{\n!com.google.cloud.dataproc.v1beta2B\rClustersProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_shared__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_CLUSTERSTATUS_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNKNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CREATING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ERROR", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DELETING", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UPDATING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STOPPING", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STOPPED", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STARTING", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4857, - serialized_end=4984, -) -_sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_STATE) - -_CLUSTERSTATUS_SUBSTATE = _descriptor.EnumDescriptor( - name="Substate", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus.Substate", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="UNHEALTHY", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STALE_STATUS", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4986, - serialized_end=5046, -) -_sym_db.RegisterEnumDescriptor(_CLUSTERSTATUS_SUBSTATE) - -_RESERVATIONAFFINITY_TYPE = _descriptor.EnumDescriptor( - name="Type", - full_name="google.cloud.dataproc.v1beta2.ReservationAffinity.Type", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="TYPE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NO_RESERVATION", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ANY_RESERVATION", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SPECIFIC_RESERVATION", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=6878, - serialized_end=6973, -) -_sym_db.RegisterEnumDescriptor(_RESERVATIONAFFINITY_TYPE) - - -_CLUSTER_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1beta2.Cluster.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.Cluster.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.Cluster.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=828, - serialized_end=873, -) - -_CLUSTER = _descriptor.Descriptor( - name="Cluster", - full_name="google.cloud.dataproc.v1beta2.Cluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.Cluster.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.Cluster.cluster_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="config", - full_name="google.cloud.dataproc.v1beta2.Cluster.config", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1beta2.Cluster.labels", - index=3, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1beta2.Cluster.status", - index=4, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status_history", - full_name="google.cloud.dataproc.v1beta2.Cluster.status_history", - index=5, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1beta2.Cluster.cluster_uuid", - index=6, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="metrics", - full_name="google.cloud.dataproc.v1beta2.Cluster.metrics", - index=7, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTER_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=387, - serialized_end=873, -) - - -_CLUSTERCONFIG = _descriptor.Descriptor( - name="ClusterConfig", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="config_bucket", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.config_bucket", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="gce_cluster_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.gce_cluster_config", - index=1, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="master_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.master_config", - index=2, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="worker_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.worker_config", - index=3, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="secondary_worker_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.secondary_worker_config", - index=4, - number=12, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="software_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.software_config", - index=5, - number=13, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="lifecycle_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.lifecycle_config", - index=6, - number=14, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="initialization_actions", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.initialization_actions", - index=7, - number=11, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="encryption_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.encryption_config", - index=8, - number=15, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="autoscaling_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.autoscaling_config", - index=9, - number=16, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="endpoint_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.endpoint_config", - index=10, - number=17, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="security_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.security_config", - index=11, - number=18, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="gke_cluster_config", - full_name="google.cloud.dataproc.v1beta2.ClusterConfig.gke_cluster_config", - index=12, - number=19, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=876, - serialized_end=1901, -) - - -_GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET = _descriptor.Descriptor( - name="NamespacedGkeDeploymentTarget", - full_name="google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="target_gke_cluster", - full_name="google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.target_gke_cluster", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b'\340A\001\372A"\n container.googleapis.com/Cluster', - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_namespace", - full_name="google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget.cluster_namespace", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2051, - serialized_end=2184, -) - -_GKECLUSTERCONFIG = _descriptor.Descriptor( - name="GkeClusterConfig", - full_name="google.cloud.dataproc.v1beta2.GkeClusterConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="namespaced_gke_deployment_target", - full_name="google.cloud.dataproc.v1beta2.GkeClusterConfig.namespaced_gke_deployment_target", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1904, - serialized_end=2184, -) - - -_ENDPOINTCONFIG_HTTPPORTSENTRY = _descriptor.Descriptor( - name="HttpPortsEntry", - full_name="google.cloud.dataproc.v1beta2.EndpointConfig.HttpPortsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.EndpointConfig.HttpPortsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.EndpointConfig.HttpPortsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2330, - serialized_end=2378, -) - -_ENDPOINTCONFIG = _descriptor.Descriptor( - name="EndpointConfig", - full_name="google.cloud.dataproc.v1beta2.EndpointConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="http_ports", - full_name="google.cloud.dataproc.v1beta2.EndpointConfig.http_ports", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="enable_http_port_access", - full_name="google.cloud.dataproc.v1beta2.EndpointConfig.enable_http_port_access", - index=1, - number=2, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_ENDPOINTCONFIG_HTTPPORTSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2187, - serialized_end=2378, -) - - -_AUTOSCALINGCONFIG = _descriptor.Descriptor( - name="AutoscalingConfig", - full_name="google.cloud.dataproc.v1beta2.AutoscalingConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="policy_uri", - full_name="google.cloud.dataproc.v1beta2.AutoscalingConfig.policy_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2380, - serialized_end=2424, -) - - -_ENCRYPTIONCONFIG = _descriptor.Descriptor( - name="EncryptionConfig", - full_name="google.cloud.dataproc.v1beta2.EncryptionConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="gce_pd_kms_key_name", - full_name="google.cloud.dataproc.v1beta2.EncryptionConfig.gce_pd_kms_key_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2426, - serialized_end=2478, -) - - -_GCECLUSTERCONFIG_METADATAENTRY = _descriptor.Descriptor( - name="MetadataEntry", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2859, - serialized_end=2906, -) - -_GCECLUSTERCONFIG = _descriptor.Descriptor( - name="GceClusterConfig", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="zone_uri", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.zone_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="network_uri", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.network_uri", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="subnetwork_uri", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.subnetwork_uri", - index=2, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="internal_ip_only", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.internal_ip_only", - index=3, - number=7, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="service_account", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.service_account", - index=4, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="service_account_scopes", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.service_account_scopes", - index=5, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tags", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.tags", - index=6, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="metadata", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.metadata", - index=7, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="reservation_affinity", - full_name="google.cloud.dataproc.v1beta2.GceClusterConfig.reservation_affinity", - index=8, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_GCECLUSTERCONFIG_METADATAENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2481, - serialized_end=2906, -) - - -_INSTANCEGROUPCONFIG = _descriptor.Descriptor( - name="InstanceGroupConfig", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="num_instances", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.num_instances", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_names", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.instance_names", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="image_uri", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.image_uri", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="machine_type_uri", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.machine_type_uri", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="disk_config", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.disk_config", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="is_preemptible", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.is_preemptible", - index=5, - number=6, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="managed_group_config", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.managed_group_config", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="accelerators", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.accelerators", - index=7, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="min_cpu_platform", - full_name="google.cloud.dataproc.v1beta2.InstanceGroupConfig.min_cpu_platform", - index=8, - number=9, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2909, - serialized_end=3329, -) - - -_MANAGEDGROUPCONFIG = _descriptor.Descriptor( - name="ManagedGroupConfig", - full_name="google.cloud.dataproc.v1beta2.ManagedGroupConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="instance_template_name", - full_name="google.cloud.dataproc.v1beta2.ManagedGroupConfig.instance_template_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_group_manager_name", - full_name="google.cloud.dataproc.v1beta2.ManagedGroupConfig.instance_group_manager_name", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3331, - serialized_end=3430, -) - - -_ACCELERATORCONFIG = _descriptor.Descriptor( - name="AcceleratorConfig", - full_name="google.cloud.dataproc.v1beta2.AcceleratorConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="accelerator_type_uri", - full_name="google.cloud.dataproc.v1beta2.AcceleratorConfig.accelerator_type_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="accelerator_count", - full_name="google.cloud.dataproc.v1beta2.AcceleratorConfig.accelerator_count", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3432, - serialized_end=3508, -) - - -_DISKCONFIG = _descriptor.Descriptor( - name="DiskConfig", - full_name="google.cloud.dataproc.v1beta2.DiskConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="boot_disk_type", - full_name="google.cloud.dataproc.v1beta2.DiskConfig.boot_disk_type", - index=0, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="boot_disk_size_gb", - full_name="google.cloud.dataproc.v1beta2.DiskConfig.boot_disk_size_gb", - index=1, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="num_local_ssds", - full_name="google.cloud.dataproc.v1beta2.DiskConfig.num_local_ssds", - index=2, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3510, - serialized_end=3607, -) - - -_LIFECYCLECONFIG = _descriptor.Descriptor( - name="LifecycleConfig", - full_name="google.cloud.dataproc.v1beta2.LifecycleConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="idle_delete_ttl", - full_name="google.cloud.dataproc.v1beta2.LifecycleConfig.idle_delete_ttl", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="auto_delete_time", - full_name="google.cloud.dataproc.v1beta2.LifecycleConfig.auto_delete_time", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="auto_delete_ttl", - full_name="google.cloud.dataproc.v1beta2.LifecycleConfig.auto_delete_ttl", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="idle_start_time", - full_name="google.cloud.dataproc.v1beta2.LifecycleConfig.idle_start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="ttl", - full_name="google.cloud.dataproc.v1beta2.LifecycleConfig.ttl", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=3610, - serialized_end=3869, -) - - -_SECURITYCONFIG = _descriptor.Descriptor( - name="SecurityConfig", - full_name="google.cloud.dataproc.v1beta2.SecurityConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="kerberos_config", - full_name="google.cloud.dataproc.v1beta2.SecurityConfig.kerberos_config", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3871, - serialized_end=3959, -) - - -_KERBEROSCONFIG = _descriptor.Descriptor( - name="KerberosConfig", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="enable_kerberos", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.enable_kerberos", - index=0, - number=1, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="root_principal_password_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.root_principal_password_uri", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="kms_key_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.kms_key_uri", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="keystore_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.keystore_uri", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="truststore_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.truststore_uri", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="keystore_password_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.keystore_password_uri", - index=5, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="key_password_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.key_password_uri", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="truststore_password_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.truststore_password_uri", - index=7, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_realm", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.cross_realm_trust_realm", - index=8, - number=9, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_kdc", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.cross_realm_trust_kdc", - index=9, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_admin_server", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.cross_realm_trust_admin_server", - index=10, - number=11, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cross_realm_trust_shared_password_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.cross_realm_trust_shared_password_uri", - index=11, - number=12, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="kdc_db_key_uri", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.kdc_db_key_uri", - index=12, - number=13, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tgt_lifetime_hours", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.tgt_lifetime_hours", - index=13, - number=14, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="realm", - full_name="google.cloud.dataproc.v1beta2.KerberosConfig.realm", - index=14, - number=15, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3962, - serialized_end=4490, -) - - -_NODEINITIALIZATIONACTION = _descriptor.Descriptor( - name="NodeInitializationAction", - full_name="google.cloud.dataproc.v1beta2.NodeInitializationAction", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="executable_file", - full_name="google.cloud.dataproc.v1beta2.NodeInitializationAction.executable_file", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="execution_timeout", - full_name="google.cloud.dataproc.v1beta2.NodeInitializationAction.execution_timeout", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4492, - serialized_end=4607, -) - - -_CLUSTERSTATUS = _descriptor.Descriptor( - name="ClusterStatus", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus.state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="detail", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus.detail", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state_start_time", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus.state_start_time", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="substate", - full_name="google.cloud.dataproc.v1beta2.ClusterStatus.substate", - index=3, - number=4, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_CLUSTERSTATUS_STATE, _CLUSTERSTATUS_SUBSTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4610, - serialized_end=5046, -) - - -_SOFTWARECONFIG_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5254, - serialized_end=5303, -) - -_SOFTWARECONFIG = _descriptor.Descriptor( - name="SoftwareConfig", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="image_version", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig.image_version", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig.properties", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="optional_components", - full_name="google.cloud.dataproc.v1beta2.SoftwareConfig.optional_components", - index=2, - number=3, - type=14, - cpp_type=8, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SOFTWARECONFIG_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5049, - serialized_end=5303, -) - - -_CLUSTERMETRICS_HDFSMETRICSENTRY = _descriptor.Descriptor( - name="HdfsMetricsEntry", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry.value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5496, - serialized_end=5546, -) - -_CLUSTERMETRICS_YARNMETRICSENTRY = _descriptor.Descriptor( - name="YarnMetricsEntry", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry.value", - index=1, - number=2, - type=3, - cpp_type=2, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5548, - serialized_end=5598, -) - -_CLUSTERMETRICS = _descriptor.Descriptor( - name="ClusterMetrics", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="hdfs_metrics", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.hdfs_metrics", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="yarn_metrics", - full_name="google.cloud.dataproc.v1beta2.ClusterMetrics.yarn_metrics", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTERMETRICS_HDFSMETRICSENTRY, _CLUSTERMETRICS_YARNMETRICSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5306, - serialized_end=5598, -) - - -_CREATECLUSTERREQUEST = _descriptor.Descriptor( - name="CreateClusterRequest", - full_name="google.cloud.dataproc.v1beta2.CreateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.CreateClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.CreateClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.cloud.dataproc.v1beta2.CreateClusterRequest.cluster", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1beta2.CreateClusterRequest.request_id", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5601, - serialized_end=5756, -) - - -_UPDATECLUSTERREQUEST = _descriptor.Descriptor( - name="UpdateClusterRequest", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.region", - index=1, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.cluster", - index=3, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="graceful_decommission_timeout", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.graceful_decommission_timeout", - index=4, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.update_mask", - index=5, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1beta2.UpdateClusterRequest.request_id", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5759, - serialized_end=6066, -) - - -_DELETECLUSTERREQUEST = _descriptor.Descriptor( - name="DeleteClusterRequest", - full_name="google.cloud.dataproc.v1beta2.DeleteClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.DeleteClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.DeleteClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.DeleteClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1beta2.DeleteClusterRequest.cluster_uuid", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1beta2.DeleteClusterRequest.request_id", - index=4, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6069, - serialized_end=6216, -) - - -_GETCLUSTERREQUEST = _descriptor.Descriptor( - name="GetClusterRequest", - full_name="google.cloud.dataproc.v1beta2.GetClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.GetClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.GetClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.GetClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6218, - serialized_end=6310, -) - - -_LISTCLUSTERSREQUEST = _descriptor.Descriptor( - name="ListClustersRequest", - full_name="google.cloud.dataproc.v1beta2.ListClustersRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.ListClustersRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.ListClustersRequest.region", - index=1, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.dataproc.v1beta2.ListClustersRequest.filter", - index=2, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.dataproc.v1beta2.ListClustersRequest.page_size", - index=3, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.dataproc.v1beta2.ListClustersRequest.page_token", - index=4, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6313, - serialized_end=6450, -) - - -_LISTCLUSTERSRESPONSE = _descriptor.Descriptor( - name="ListClustersResponse", - full_name="google.cloud.dataproc.v1beta2.ListClustersResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="clusters", - full_name="google.cloud.dataproc.v1beta2.ListClustersResponse.clusters", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.dataproc.v1beta2.ListClustersResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6452, - serialized_end=6567, -) - - -_DIAGNOSECLUSTERREQUEST = _descriptor.Descriptor( - name="DiagnoseClusterRequest", - full_name="google.cloud.dataproc.v1beta2.DiagnoseClusterRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.DiagnoseClusterRequest.cluster_name", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6569, - serialized_end=6666, -) - - -_DIAGNOSECLUSTERRESULTS = _descriptor.Descriptor( - name="DiagnoseClusterResults", - full_name="google.cloud.dataproc.v1beta2.DiagnoseClusterResults", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="output_uri", - full_name="google.cloud.dataproc.v1beta2.DiagnoseClusterResults.output_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6668, - serialized_end=6717, -) - - -_RESERVATIONAFFINITY = _descriptor.Descriptor( - name="ReservationAffinity", - full_name="google.cloud.dataproc.v1beta2.ReservationAffinity", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="consume_reservation_type", - full_name="google.cloud.dataproc.v1beta2.ReservationAffinity.consume_reservation_type", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.ReservationAffinity.key", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.cloud.dataproc.v1beta2.ReservationAffinity.values", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_RESERVATIONAFFINITY_TYPE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6720, - serialized_end=6973, -) - -_CLUSTER_LABELSENTRY.containing_type = _CLUSTER -_CLUSTER.fields_by_name["config"].message_type = _CLUSTERCONFIG -_CLUSTER.fields_by_name["labels"].message_type = _CLUSTER_LABELSENTRY -_CLUSTER.fields_by_name["status"].message_type = _CLUSTERSTATUS -_CLUSTER.fields_by_name["status_history"].message_type = _CLUSTERSTATUS -_CLUSTER.fields_by_name["metrics"].message_type = _CLUSTERMETRICS -_CLUSTERCONFIG.fields_by_name["gce_cluster_config"].message_type = _GCECLUSTERCONFIG -_CLUSTERCONFIG.fields_by_name["master_config"].message_type = _INSTANCEGROUPCONFIG -_CLUSTERCONFIG.fields_by_name["worker_config"].message_type = _INSTANCEGROUPCONFIG -_CLUSTERCONFIG.fields_by_name[ - "secondary_worker_config" -].message_type = _INSTANCEGROUPCONFIG -_CLUSTERCONFIG.fields_by_name["software_config"].message_type = _SOFTWARECONFIG -_CLUSTERCONFIG.fields_by_name["lifecycle_config"].message_type = _LIFECYCLECONFIG -_CLUSTERCONFIG.fields_by_name[ - "initialization_actions" -].message_type = _NODEINITIALIZATIONACTION -_CLUSTERCONFIG.fields_by_name["encryption_config"].message_type = _ENCRYPTIONCONFIG -_CLUSTERCONFIG.fields_by_name["autoscaling_config"].message_type = _AUTOSCALINGCONFIG -_CLUSTERCONFIG.fields_by_name["endpoint_config"].message_type = _ENDPOINTCONFIG -_CLUSTERCONFIG.fields_by_name["security_config"].message_type = _SECURITYCONFIG -_CLUSTERCONFIG.fields_by_name["gke_cluster_config"].message_type = _GKECLUSTERCONFIG -_GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET.containing_type = _GKECLUSTERCONFIG -_GKECLUSTERCONFIG.fields_by_name[ - "namespaced_gke_deployment_target" -].message_type = _GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET -_ENDPOINTCONFIG_HTTPPORTSENTRY.containing_type = _ENDPOINTCONFIG -_ENDPOINTCONFIG.fields_by_name[ - "http_ports" -].message_type = _ENDPOINTCONFIG_HTTPPORTSENTRY -_GCECLUSTERCONFIG_METADATAENTRY.containing_type = _GCECLUSTERCONFIG -_GCECLUSTERCONFIG.fields_by_name[ - "metadata" -].message_type = _GCECLUSTERCONFIG_METADATAENTRY -_GCECLUSTERCONFIG.fields_by_name[ - "reservation_affinity" -].message_type = _RESERVATIONAFFINITY -_INSTANCEGROUPCONFIG.fields_by_name["disk_config"].message_type = _DISKCONFIG -_INSTANCEGROUPCONFIG.fields_by_name[ - "managed_group_config" -].message_type = _MANAGEDGROUPCONFIG -_INSTANCEGROUPCONFIG.fields_by_name["accelerators"].message_type = _ACCELERATORCONFIG -_LIFECYCLECONFIG.fields_by_name[ - "idle_delete_ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_ttl" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_LIFECYCLECONFIG.fields_by_name[ - "idle_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_LIFECYCLECONFIG.oneofs_by_name["ttl"].fields.append( - _LIFECYCLECONFIG.fields_by_name["auto_delete_time"] -) -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_time" -].containing_oneof = _LIFECYCLECONFIG.oneofs_by_name["ttl"] -_LIFECYCLECONFIG.oneofs_by_name["ttl"].fields.append( - _LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"] -) -_LIFECYCLECONFIG.fields_by_name[ - "auto_delete_ttl" -].containing_oneof = _LIFECYCLECONFIG.oneofs_by_name["ttl"] -_SECURITYCONFIG.fields_by_name["kerberos_config"].message_type = _KERBEROSCONFIG -_NODEINITIALIZATIONACTION.fields_by_name[ - "execution_timeout" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_CLUSTERSTATUS.fields_by_name["state"].enum_type = _CLUSTERSTATUS_STATE -_CLUSTERSTATUS.fields_by_name[ - "state_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CLUSTERSTATUS.fields_by_name["substate"].enum_type = _CLUSTERSTATUS_SUBSTATE -_CLUSTERSTATUS_STATE.containing_type = _CLUSTERSTATUS -_CLUSTERSTATUS_SUBSTATE.containing_type = _CLUSTERSTATUS -_SOFTWARECONFIG_PROPERTIESENTRY.containing_type = _SOFTWARECONFIG -_SOFTWARECONFIG.fields_by_name[ - "properties" -].message_type = _SOFTWARECONFIG_PROPERTIESENTRY -_SOFTWARECONFIG.fields_by_name[ - "optional_components" -].enum_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_shared__pb2._COMPONENT -) -_CLUSTERMETRICS_HDFSMETRICSENTRY.containing_type = _CLUSTERMETRICS -_CLUSTERMETRICS_YARNMETRICSENTRY.containing_type = _CLUSTERMETRICS -_CLUSTERMETRICS.fields_by_name[ - "hdfs_metrics" -].message_type = _CLUSTERMETRICS_HDFSMETRICSENTRY -_CLUSTERMETRICS.fields_by_name[ - "yarn_metrics" -].message_type = _CLUSTERMETRICS_YARNMETRICSENTRY -_CREATECLUSTERREQUEST.fields_by_name["cluster"].message_type = _CLUSTER -_UPDATECLUSTERREQUEST.fields_by_name["cluster"].message_type = _CLUSTER -_UPDATECLUSTERREQUEST.fields_by_name[ - "graceful_decommission_timeout" -].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_UPDATECLUSTERREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTCLUSTERSRESPONSE.fields_by_name["clusters"].message_type = _CLUSTER -_RESERVATIONAFFINITY.fields_by_name[ - "consume_reservation_type" -].enum_type = _RESERVATIONAFFINITY_TYPE -_RESERVATIONAFFINITY_TYPE.containing_type = _RESERVATIONAFFINITY -DESCRIPTOR.message_types_by_name["Cluster"] = _CLUSTER -DESCRIPTOR.message_types_by_name["ClusterConfig"] = _CLUSTERCONFIG -DESCRIPTOR.message_types_by_name["GkeClusterConfig"] = _GKECLUSTERCONFIG -DESCRIPTOR.message_types_by_name["EndpointConfig"] = _ENDPOINTCONFIG -DESCRIPTOR.message_types_by_name["AutoscalingConfig"] = _AUTOSCALINGCONFIG -DESCRIPTOR.message_types_by_name["EncryptionConfig"] = _ENCRYPTIONCONFIG -DESCRIPTOR.message_types_by_name["GceClusterConfig"] = _GCECLUSTERCONFIG -DESCRIPTOR.message_types_by_name["InstanceGroupConfig"] = _INSTANCEGROUPCONFIG -DESCRIPTOR.message_types_by_name["ManagedGroupConfig"] = _MANAGEDGROUPCONFIG -DESCRIPTOR.message_types_by_name["AcceleratorConfig"] = _ACCELERATORCONFIG -DESCRIPTOR.message_types_by_name["DiskConfig"] = _DISKCONFIG -DESCRIPTOR.message_types_by_name["LifecycleConfig"] = _LIFECYCLECONFIG -DESCRIPTOR.message_types_by_name["SecurityConfig"] = _SECURITYCONFIG -DESCRIPTOR.message_types_by_name["KerberosConfig"] = _KERBEROSCONFIG -DESCRIPTOR.message_types_by_name["NodeInitializationAction"] = _NODEINITIALIZATIONACTION -DESCRIPTOR.message_types_by_name["ClusterStatus"] = _CLUSTERSTATUS -DESCRIPTOR.message_types_by_name["SoftwareConfig"] = _SOFTWARECONFIG -DESCRIPTOR.message_types_by_name["ClusterMetrics"] = _CLUSTERMETRICS -DESCRIPTOR.message_types_by_name["CreateClusterRequest"] = _CREATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["UpdateClusterRequest"] = _UPDATECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["DeleteClusterRequest"] = _DELETECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["GetClusterRequest"] = _GETCLUSTERREQUEST -DESCRIPTOR.message_types_by_name["ListClustersRequest"] = _LISTCLUSTERSREQUEST -DESCRIPTOR.message_types_by_name["ListClustersResponse"] = _LISTCLUSTERSRESPONSE -DESCRIPTOR.message_types_by_name["DiagnoseClusterRequest"] = _DIAGNOSECLUSTERREQUEST -DESCRIPTOR.message_types_by_name["DiagnoseClusterResults"] = _DIAGNOSECLUSTERRESULTS -DESCRIPTOR.message_types_by_name["ReservationAffinity"] = _RESERVATIONAFFINITY -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Cluster = _reflection.GeneratedProtocolMessageType( - "Cluster", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTER_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.Cluster.LabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTER, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Describes the identifying information, config, and status of a cluster - of Compute Engine instances. - - Attributes: - project_id: - Required. The Google Cloud Platform project ID that the - cluster belongs to. - cluster_name: - Required. The cluster name. Cluster names within a project - must be unique. Names of deleted clusters can be reused. - config: - Required. The cluster config. Note that Dataproc may set - default values, and values may change when clusters are - updated. - labels: - Optional. The labels to associate with this cluster. Label - **keys** must contain 1 to 63 characters, and must conform to - `RFC 1035 `__. Label - **values** may be empty, but, if present, must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. No more than 32 - labels can be associated with a cluster. - status: - Output only. Cluster status. - status_history: - Output only. The previous cluster status. - cluster_uuid: - Output only. A cluster UUID (Unique Universal Identifier). - Dataproc generates this value when it creates the cluster. - metrics: - Output only. Contains cluster daemon metrics such as HDFS and - YARN stats. **Beta Feature**: This report is available for - testing purposes only. It may be changed before final release. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.Cluster) - }, -) -_sym_db.RegisterMessage(Cluster) -_sym_db.RegisterMessage(Cluster.LabelsEntry) - -ClusterConfig = _reflection.GeneratedProtocolMessageType( - "ClusterConfig", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """The cluster config. - - Attributes: - config_bucket: - Optional. A Cloud Storage bucket used to stage job - dependencies, config files, and job driver console output. If - you do not specify a staging bucket, Cloud Dataproc will - determine a Cloud Storage location (US, ASIA, or EU) for your - cluster’s staging bucket according to the Compute Engine zone - where your cluster is deployed, and then create and manage - this project-level, per-location bucket (see `Dataproc staging - bucket - `__). - gce_cluster_config: - Optional. The shared Compute Engine config settings for all - instances in a cluster. - master_config: - Optional. The Compute Engine config settings for the master - instance in a cluster. - worker_config: - Optional. The Compute Engine config settings for worker - instances in a cluster. - secondary_worker_config: - Optional. The Compute Engine config settings for additional - worker instances in a cluster. - software_config: - Optional. The config settings for software inside the cluster. - lifecycle_config: - Optional. The config setting for auto delete cluster schedule. - initialization_actions: - Optional. Commands to execute on each node after config is - completed. By default, executables are run on master and all - worker nodes. You can test a node’s role metadata to run an - executable on a master or worker node, as shown below using - ``curl`` (you can also use ``wget``): :: ROLE=$(curl -H - Metadata-Flavor:Google http://metadata/computeMetadata/v1be - ta2/instance/attributes/dataproc-role) if [[ "${ROLE}" == - 'Master' ]]; then ... master specific actions ... else - ... worker specific actions ... fi - encryption_config: - Optional. Encryption settings for the cluster. - autoscaling_config: - Optional. Autoscaling config for the policy associated with - the cluster. Cluster does not autoscale if this field is - unset. - endpoint_config: - Optional. Port/endpoint configuration for this cluster - security_config: - Optional. Security related configuration. - gke_cluster_config: - Optional. The Kubernetes Engine config for Dataproc clusters - deployed to Kubernetes. Setting this is considered mutually - exclusive with Compute Engine-based options such as - ``gce_cluster_config``, ``master_config``, ``worker_config``, - ``secondary_worker_config``, and ``autoscaling_config``. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterConfig) - }, -) -_sym_db.RegisterMessage(ClusterConfig) - -GkeClusterConfig = _reflection.GeneratedProtocolMessageType( - "GkeClusterConfig", - (_message.Message,), - { - "NamespacedGkeDeploymentTarget": _reflection.GeneratedProtocolMessageType( - "NamespacedGkeDeploymentTarget", - (_message.Message,), - { - "DESCRIPTOR": _GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """A full, namespace-isolated deployment target for an existing GKE - cluster. - - Attributes: - target_gke_cluster: - Optional. The target GKE cluster to deploy to. Format: ‘projec - ts/{project}/locations/{location}/clusters/{cluster_id}’ - cluster_namespace: - Optional. A namespace within the GKE cluster to deploy into. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GkeClusterConfig.NamespacedGkeDeploymentTarget) - }, - ), - "DESCRIPTOR": _GKECLUSTERCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """The GKE config for this cluster. - - Attributes: - namespaced_gke_deployment_target: - Optional. A target for the deployment. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GkeClusterConfig) - }, -) -_sym_db.RegisterMessage(GkeClusterConfig) -_sym_db.RegisterMessage(GkeClusterConfig.NamespacedGkeDeploymentTarget) - -EndpointConfig = _reflection.GeneratedProtocolMessageType( - "EndpointConfig", - (_message.Message,), - { - "HttpPortsEntry": _reflection.GeneratedProtocolMessageType( - "HttpPortsEntry", - (_message.Message,), - { - "DESCRIPTOR": _ENDPOINTCONFIG_HTTPPORTSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.EndpointConfig.HttpPortsEntry) - }, - ), - "DESCRIPTOR": _ENDPOINTCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Endpoint config for this cluster - - Attributes: - http_ports: - Output only. The map of port descriptions to URLs. Will only - be populated if enable_http_port_access is true. - enable_http_port_access: - Optional. If true, enable http access to specific ports on the - cluster from external sources. Defaults to false. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.EndpointConfig) - }, -) -_sym_db.RegisterMessage(EndpointConfig) -_sym_db.RegisterMessage(EndpointConfig.HttpPortsEntry) - -AutoscalingConfig = _reflection.GeneratedProtocolMessageType( - "AutoscalingConfig", - (_message.Message,), - { - "DESCRIPTOR": _AUTOSCALINGCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Autoscaling Policy config associated with the cluster. - - Attributes: - policy_uri: - Optional. The autoscaling policy used by the cluster. Only - resource names including projectid and location (region) are - valid. Examples: - ``https://www.googleapis.com/compute/v1/p - rojects/[project_id]/locations/[dataproc_region]/autoscalingPo - licies/[policy_id]`` - ``projects/[project_id]/locations/[dat - aproc_region]/autoscalingPolicies/[policy_id]`` Note that the - policy must be in the same project and Dataproc region. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.AutoscalingConfig) - }, -) -_sym_db.RegisterMessage(AutoscalingConfig) - -EncryptionConfig = _reflection.GeneratedProtocolMessageType( - "EncryptionConfig", - (_message.Message,), - { - "DESCRIPTOR": _ENCRYPTIONCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Encryption settings for the cluster. - - Attributes: - gce_pd_kms_key_name: - Optional. The Cloud KMS key name to use for PD disk encryption - for all instances in the cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.EncryptionConfig) - }, -) -_sym_db.RegisterMessage(EncryptionConfig) - -GceClusterConfig = _reflection.GeneratedProtocolMessageType( - "GceClusterConfig", - (_message.Message,), - { - "MetadataEntry": _reflection.GeneratedProtocolMessageType( - "MetadataEntry", - (_message.Message,), - { - "DESCRIPTOR": _GCECLUSTERCONFIG_METADATAENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry) - }, - ), - "DESCRIPTOR": _GCECLUSTERCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Common config settings for resources of Compute Engine cluster - instances, applicable to all instances in the cluster. - - Attributes: - zone_uri: - Optional. The zone where the Compute Engine cluster will be - located. On a create request, it is required in the “global” - region. If omitted in a non-global Dataproc region, the - service will pick a zone in the corresponding Compute Engine - region. On a get request, zone will always be present. A full - URL, partial URI, or short name are valid. Examples: - ``htt - ps://www.googleapis.com/compute/v1/projects/[project_id]/zones - /[zone]`` - ``projects/[project_id]/zones/[zone]`` - ``us- - central1-f`` - network_uri: - Optional. The Compute Engine network to be used for machine - communications. Cannot be specified with subnetwork_uri. If - neither ``network_uri`` nor ``subnetwork_uri`` is specified, - the “default” network of the project is used, if it exists. - Cannot be a “Custom Subnet Network” (see `Using Subnetworks - `__ for - more information). A full URL, partial URI, or short name are - valid. Examples: - ``https://www.googleapis.com/compute/v1/p - rojects/[project_id]/regions/global/default`` - - ``projects/[project_id]/regions/global/default`` - - ``default`` - subnetwork_uri: - Optional. The Compute Engine subnetwork to be used for machine - communications. Cannot be specified with network_uri. A full - URL, partial URI, or short name are valid. Examples: - ``htt - ps://www.googleapis.com/compute/v1/projects/[project_id]/regio - ns/us-east1/subnetworks/sub0`` - - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` - - ``sub0`` - internal_ip_only: - Optional. If true, all instances in the cluster will only have - internal IP addresses. By default, clusters are not restricted - to internal IP addresses, and will have ephemeral external IP - addresses assigned to each instance. This ``internal_ip_only`` - restriction can only be enabled for subnetwork enabled - networks, and all off-cluster dependencies must be configured - to be accessible without external IP addresses. - service_account: - Optional. The `Dataproc service account - `__ (also see `VM - Data Plane identity - `__) used by - Dataproc cluster VM instances to access Google Cloud Platform - services. If not specified, the `Compute Engine default - service account - `__ is used. - service_account_scopes: - Optional. The URIs of service account scopes to be included in - Compute Engine instances. The following base set of scopes is - always included: - - https://www.googleapis.com/auth/cloud.useraccounts.readonly - - https://www.googleapis.com/auth/devstorage.read_write - - https://www.googleapis.com/auth/logging.write If no scopes - are specified, the following defaults are also provided: - - https://www.googleapis.com/auth/bigquery - - https://www.googleapis.com/auth/bigtable.admin.table - - https://www.googleapis.com/auth/bigtable.data - - https://www.googleapis.com/auth/devstorage.full_control - tags: - The Compute Engine tags to add to all instances (see `Tagging - instances `__). - metadata: - The Compute Engine metadata entries to add to all instances - (see `Project and instance metadata - `__). - reservation_affinity: - Optional. Reservation Affinity for consuming Zonal - reservation. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GceClusterConfig) - }, -) -_sym_db.RegisterMessage(GceClusterConfig) -_sym_db.RegisterMessage(GceClusterConfig.MetadataEntry) - -InstanceGroupConfig = _reflection.GeneratedProtocolMessageType( - "InstanceGroupConfig", - (_message.Message,), - { - "DESCRIPTOR": _INSTANCEGROUPCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """The config settings for Compute Engine resources in an instance group, - such as a master or worker group. - - Attributes: - num_instances: - Optional. The number of VM instances in the instance group. - For master instance groups, must be set to 1. - instance_names: - Output only. The list of instance names. Dataproc derives the - names from ``cluster_name``, ``num_instances``, and the - instance group. - image_uri: - Optional. The Compute Engine image resource used for cluster - instances. The URI can represent an image or image family. - Image examples: - ``https://www.googleapis.com/compute/beta/ - projects/[project_id]/global/images/[image-id]`` - - ``projects/[project_id]/global/images/[image-id]`` - ``image- - id`` Image family examples. Dataproc will use the most recent - image from the family: - ``https://www.googleapis.com/comput - e/beta/projects/[project_id]/global/images/family/[custom- - image-family-name]`` - - ``projects/[project_id]/global/images/family/[custom-image- - family-name]`` If the URI is unspecified, it will be inferred - from ``SoftwareConfig.image_version`` or the system default. - machine_type_uri: - Optional. The Compute Engine machine type used for cluster - instances. A full URL, partial URI, or short name are valid. - Examples: - ``https://www.googleapis.com/compute/v1/projects - /[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` - - ``projects/[project_id]/zones/us- - east1-a/machineTypes/n1-standard-2`` - ``n1-standard-2`` - **Auto Zone Exception**: If you are using the Dataproc `Auto - Zone Placement - `__ feature, you - must use the short name of the machine type resource, for - example, ``n1-standard-2``. - disk_config: - Optional. Disk option config settings. - is_preemptible: - Output only. Specifies that this instance group contains - preemptible instances. - managed_group_config: - Output only. The config for Compute Engine Instance Group - Manager that manages this group. This is only used for - preemptible instance groups. - accelerators: - Optional. The Compute Engine accelerator configuration for - these instances. - min_cpu_platform: - Specifies the minimum cpu platform for the Instance Group. See - `Dataproc -> Minimum CPU Platform `__. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstanceGroupConfig) - }, -) -_sym_db.RegisterMessage(InstanceGroupConfig) - -ManagedGroupConfig = _reflection.GeneratedProtocolMessageType( - "ManagedGroupConfig", - (_message.Message,), - { - "DESCRIPTOR": _MANAGEDGROUPCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies the resources used to actively manage an instance group. - - Attributes: - instance_template_name: - Output only. The name of the Instance Template used for the - Managed Instance Group. - instance_group_manager_name: - Output only. The name of the Instance Group Manager for this - group. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ManagedGroupConfig) - }, -) -_sym_db.RegisterMessage(ManagedGroupConfig) - -AcceleratorConfig = _reflection.GeneratedProtocolMessageType( - "AcceleratorConfig", - (_message.Message,), - { - "DESCRIPTOR": _ACCELERATORCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies the type and number of accelerator cards attached to the - instances of an instance group (see `GPUs on Compute Engine - `__). - - Attributes: - accelerator_type_uri: - Full URL, partial URI, or short name of the accelerator type - resource to expose to this instance. See `Compute Engine - AcceleratorTypes `__ Examples \* ``https://www.googl - eapis.com/compute/beta/projects/[project_id]/zones/us- - east1-a/acceleratorTypes/nvidia-tesla-k80`` \* - ``projects/[project_id]/zones/us- - east1-a/acceleratorTypes/nvidia-tesla-k80`` \* ``nvidia- - tesla-k80`` **Auto Zone Exception**: If you are using the - Dataproc `Auto Zone Placement - `__ feature, you - must use the short name of the accelerator type resource, for - example, ``nvidia-tesla-k80``. - accelerator_count: - The number of the accelerator cards of this type exposed to - this instance. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.AcceleratorConfig) - }, -) -_sym_db.RegisterMessage(AcceleratorConfig) - -DiskConfig = _reflection.GeneratedProtocolMessageType( - "DiskConfig", - (_message.Message,), - { - "DESCRIPTOR": _DISKCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies the config of disk options for a group of VM instances. - - Attributes: - boot_disk_type: - Optional. Type of the boot disk (default is “pd-standard”). - Valid values: “pd-ssd” (Persistent Disk Solid State Drive) or - “pd-standard” (Persistent Disk Hard Disk Drive). - boot_disk_size_gb: - Optional. Size in GB of the boot disk (default is 500GB). - num_local_ssds: - Number of attached SSDs, from 0 to 4 (default is 0). If SSDs - are not attached, the boot disk is used to store runtime logs - and `HDFS `__ data. If one or more SSDs are attached, this - runtime bulk data is spread across them, and the boot disk - contains only basic config and installed binaries. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DiskConfig) - }, -) -_sym_db.RegisterMessage(DiskConfig) - -LifecycleConfig = _reflection.GeneratedProtocolMessageType( - "LifecycleConfig", - (_message.Message,), - { - "DESCRIPTOR": _LIFECYCLECONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies the cluster auto-delete schedule configuration. - - Attributes: - idle_delete_ttl: - Optional. The duration to keep the cluster alive while idling - (when no jobs are running). Passing this threshold will cause - the cluster to be deleted. Minimum value is 10 minutes; - maximum value is 14 days (see JSON representation of `Duration - `__. - ttl: - Either the exact time the cluster should be deleted at or the - cluster maximum age. - auto_delete_time: - Optional. The time when cluster will be auto-deleted. (see - JSON representation of `Timestamp - `__). - auto_delete_ttl: - Optional. The lifetime duration of cluster. The cluster will - be auto-deleted at the end of this period. Minimum value is 10 - minutes; maximum value is 14 days (see JSON representation of - `Duration `__). - idle_start_time: - Output only. The time when cluster became idle (most recent - job finished) and became eligible for deletion due to idleness - (see JSON representation of `Timestamp - `__). - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.LifecycleConfig) - }, -) -_sym_db.RegisterMessage(LifecycleConfig) - -SecurityConfig = _reflection.GeneratedProtocolMessageType( - "SecurityConfig", - (_message.Message,), - { - "DESCRIPTOR": _SECURITYCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Security related configuration, including encryption, Kerberos, etc. - - Attributes: - kerberos_config: - Kerberos related configuration. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SecurityConfig) - }, -) -_sym_db.RegisterMessage(SecurityConfig) - -KerberosConfig = _reflection.GeneratedProtocolMessageType( - "KerberosConfig", - (_message.Message,), - { - "DESCRIPTOR": _KERBEROSCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies Kerberos related configuration. - - Attributes: - enable_kerberos: - Optional. Flag to indicate whether to Kerberize the cluster - (default: false). Set this field to true to enable Kerberos on - a cluster. - root_principal_password_uri: - Required. The Cloud Storage URI of a KMS encrypted file - containing the root principal password. - kms_key_uri: - Required. The uri of the KMS key used to encrypt various - sensitive files. - keystore_uri: - Optional. The Cloud Storage URI of the keystore file used for - SSL encryption. If not provided, Dataproc will provide a self- - signed certificate. - truststore_uri: - Optional. The Cloud Storage URI of the truststore file used - for SSL encryption. If not provided, Dataproc will provide a - self-signed certificate. - keystore_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the password to the user provided keystore. For the - self-signed certificate, this password is generated by - Dataproc. - key_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the password to the user provided key. For the - self-signed certificate, this password is generated by - Dataproc. - truststore_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the password to the user provided truststore. For - the self-signed certificate, this password is generated by - Dataproc. - cross_realm_trust_realm: - Optional. The remote realm the Dataproc on-cluster KDC will - trust, should the user enable cross realm trust. - cross_realm_trust_kdc: - Optional. The KDC (IP or hostname) for the remote trusted - realm in a cross realm trust relationship. - cross_realm_trust_admin_server: - Optional. The admin server (IP or hostname) for the remote - trusted realm in a cross realm trust relationship. - cross_realm_trust_shared_password_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the shared password between the on-cluster Kerberos - realm and the remote trusted realm, in a cross realm trust - relationship. - kdc_db_key_uri: - Optional. The Cloud Storage URI of a KMS encrypted file - containing the master key of the KDC database. - tgt_lifetime_hours: - Optional. The lifetime of the ticket granting ticket, in - hours. If not specified, or user specifies 0, then default - value 10 will be used. - realm: - Optional. The name of the on-cluster Kerberos realm. If not - specified, the uppercased domain of hostnames will be the - realm. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.KerberosConfig) - }, -) -_sym_db.RegisterMessage(KerberosConfig) - -NodeInitializationAction = _reflection.GeneratedProtocolMessageType( - "NodeInitializationAction", - (_message.Message,), - { - "DESCRIPTOR": _NODEINITIALIZATIONACTION, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies an executable to run on a fully configured node and a - timeout period for executable completion. - - Attributes: - executable_file: - Required. Cloud Storage URI of executable file. - execution_timeout: - Optional. Amount of time executable has to complete. Default - is 10 minutes (see JSON representation of `Duration - `__). Cluster creation fails with an - explanatory error message (the name of the executable that - caused the error and the exceeded timeout period) if the - executable is not completed at end of the timeout period. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.NodeInitializationAction) - }, -) -_sym_db.RegisterMessage(NodeInitializationAction) - -ClusterStatus = _reflection.GeneratedProtocolMessageType( - "ClusterStatus", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERSTATUS, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """The status of a cluster and its instances. - - Attributes: - state: - Output only. The cluster’s state. - detail: - Output only. Optional details of cluster’s state. - state_start_time: - Output only. Time when this state was entered (see JSON - representation of `Timestamp - `__). - substate: - Output only. Additional state information that includes status - reported by the agent. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterStatus) - }, -) -_sym_db.RegisterMessage(ClusterStatus) - -SoftwareConfig = _reflection.GeneratedProtocolMessageType( - "SoftwareConfig", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SOFTWARECONFIG_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SOFTWARECONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Specifies the selection and config of software inside the cluster. - - Attributes: - image_version: - Optional. The version of software inside the cluster. It must - be one of the supported `Dataproc Versions `__, such as “1.2” - (including a subminor version, such as “1.2.29”), or the - `“preview” version `__. If - unspecified, it defaults to the latest Debian version. - properties: - Optional. The properties to set on daemon config files. - Property keys are specified in ``prefix:property`` format, for - example ``core:hadoop.tmp.dir``. The following are supported - prefixes and their mappings: - capacity-scheduler: - ``capacity-scheduler.xml`` - core: ``core-site.xml`` - - distcp: ``distcp-default.xml`` - hdfs: ``hdfs-site.xml`` - - hive: ``hive-site.xml`` - mapred: ``mapred-site.xml`` - pig: - ``pig.properties`` - spark: ``spark-defaults.conf`` - yarn: - ``yarn-site.xml`` For more information, see `Cluster - properties - `__. - optional_components: - The set of optional components to activate on the cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SoftwareConfig) - }, -) -_sym_db.RegisterMessage(SoftwareConfig) -_sym_db.RegisterMessage(SoftwareConfig.PropertiesEntry) - -ClusterMetrics = _reflection.GeneratedProtocolMessageType( - "ClusterMetrics", - (_message.Message,), - { - "HdfsMetricsEntry": _reflection.GeneratedProtocolMessageType( - "HdfsMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERMETRICS_HDFSMETRICSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry) - }, - ), - "YarnMetricsEntry": _reflection.GeneratedProtocolMessageType( - "YarnMetricsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERMETRICS_YARNMETRICSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry) - }, - ), - "DESCRIPTOR": _CLUSTERMETRICS, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta - Feature**: This report is available for testing purposes only. It may - be changed before final release. - - Attributes: - hdfs_metrics: - The HDFS metrics. - yarn_metrics: - The YARN metrics. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterMetrics) - }, -) -_sym_db.RegisterMessage(ClusterMetrics) -_sym_db.RegisterMessage(ClusterMetrics.HdfsMetricsEntry) -_sym_db.RegisterMessage(ClusterMetrics.YarnMetricsEntry) - -CreateClusterRequest = _reflection.GeneratedProtocolMessageType( - "CreateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """A request to create a cluster. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster: - Required. The cluster to create. - request_id: - Optional. A unique id used to identify the request. If the - server receives two [CreateClusterRequest][google.cloud.datapr - oc.v1beta2.CreateClusterRequest] requests with the same id, - then the second request will be ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CreateClusterRequest) - }, -) -_sym_db.RegisterMessage(CreateClusterRequest) - -UpdateClusterRequest = _reflection.GeneratedProtocolMessageType( - "UpdateClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """A request to update a cluster. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - cluster: - Required. The changes to the cluster. - graceful_decommission_timeout: - Optional. Timeout for graceful YARN decomissioning. Graceful - decommissioning allows removing nodes from the cluster without - interrupting jobs in progress. Timeout specifies how long to - wait for jobs in progress to finish before forcefully removing - nodes (and potentially interrupting jobs). Default timeout is - 0 (for forceful decommission), and the maximum allowed timeout - is 1 day (see JSON representation of `Duration - `__). Only supported on Dataproc - image versions 1.2 and higher. - update_mask: - Required. Specifies the path, relative to ``Cluster``, of the - field to update. For example, to change the number of workers - in a cluster to 5, the ``update_mask`` parameter would be - specified as ``config.worker_config.num_instances``, and the - ``PATCH`` request body would specify the new value, as - follows: :: { "config":{ "workerConfig":{ - "numInstances":"5" } } } Similarly, to change - the number of preemptible workers in a cluster to 5, the - ``update_mask`` parameter would be - ``config.secondary_worker_config.num_instances``, and the - ``PATCH`` request body would be set as follows: :: { - "config":{ "secondaryWorkerConfig":{ - "numInstances":"5" } } } Note: currently only - the following fields can be updated: .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. raw:: html - .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. - raw:: html .. raw:: html .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. raw:: - html .. raw:: html .. raw:: html .. raw:: html .. raw:: html - .. raw:: html .. raw:: html .. raw:: html - .. raw:: html .. raw:: html .. raw:: html .. raw:: html .. raw:: html - .. raw:: html - .. raw:: html .. raw:: html .. - raw:: html
Mask - .. raw:: html Purpose .. - raw:: html
labels .. raw:: html Updates labels .. raw:: html -
config.worker_config.num_instances .. - raw:: html Resize primary - worker group .. raw:: html
- config.secondary_worker_config.num_instances .. raw:: html - Resize secondary worker group - .. raw:: html
- config.lifecycle_config.auto_delete_ttl .. raw:: html - Reset MAX TTL duration .. - raw:: html
- config.lifecycle_config.auto_delete_time .. raw:: html - Update MAX TTL deletion - timestamp .. raw:: html
- config.lifecycle_config.idle_delete_ttl .. raw:: html - Update Idle TTL duration .. - raw:: html
- config.autoscaling_config.policy_uri .. raw:: html Use, stop using, or change autoscaling - policies .. raw:: html
- request_id: - Optional. A unique id used to identify the request. If the - server receives two [UpdateClusterRequest][google.cloud.datapr - oc.v1beta2.UpdateClusterRequest] requests with the same id, - then the second request will be ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.UpdateClusterRequest) - }, -) -_sym_db.RegisterMessage(UpdateClusterRequest) - -DeleteClusterRequest = _reflection.GeneratedProtocolMessageType( - "DeleteClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """A request to delete a cluster. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - cluster_uuid: - Optional. Specifying the ``cluster_uuid`` means the RPC should - fail (with error NOT_FOUND) if cluster with specified UUID - does not exist. - request_id: - Optional. A unique id used to identify the request. If the - server receives two [DeleteClusterRequest][google.cloud.datapr - oc.v1beta2.DeleteClusterRequest] requests with the same id, - then the second request will be ignored and the first - [google.longrunning.Operation][google.longrunning.Operation] - created and stored in the backend is returned. It is - recommended to always set this value to a `UUID `__. The id - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DeleteClusterRequest) - }, -) -_sym_db.RegisterMessage(DeleteClusterRequest) - -GetClusterRequest = _reflection.GeneratedProtocolMessageType( - "GetClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETCLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Request to get the resource representation for a cluster in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GetClusterRequest) - }, -) -_sym_db.RegisterMessage(GetClusterRequest) - -ListClustersRequest = _reflection.GeneratedProtocolMessageType( - "ListClustersRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """A request to list the clusters in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - filter: - Optional. A filter constraining the clusters to list. Filters - are case-sensitive and have the following syntax: field = - value [AND [field = value]] … where **field** is one of - ``status.state``, ``clusterName``, or ``labels.[KEY]``, and - ``[KEY]`` is a label key. **value** can be ``*`` to match all - values. ``status.state`` can be one of the following: - ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, - ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains - the ``CREATING``, ``UPDATING``, and ``RUNNING`` states. - ``INACTIVE`` contains the ``DELETING`` and ``ERROR`` states. - ``clusterName`` is the name of the cluster provided at - creation time. Only the logical ``AND`` operator is supported; - space-separated items are treated as having an implicit - ``AND`` operator. Example filter: status.state = ACTIVE AND - clusterName = mycluster AND labels.env = staging AND - labels.starred = \* - page_size: - Optional. The standard List page size. - page_token: - Optional. The standard List page token. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListClustersRequest) - }, -) -_sym_db.RegisterMessage(ListClustersRequest) - -ListClustersResponse = _reflection.GeneratedProtocolMessageType( - "ListClustersResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTCLUSTERSRESPONSE, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """The list of all clusters in a project. - - Attributes: - clusters: - Output only. The clusters in the project. - next_page_token: - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the ``page_token`` in a subsequent - ListClustersRequest. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListClustersResponse) - }, -) -_sym_db.RegisterMessage(ListClustersResponse) - -DiagnoseClusterRequest = _reflection.GeneratedProtocolMessageType( - "DiagnoseClusterRequest", - (_message.Message,), - { - "DESCRIPTOR": _DIAGNOSECLUSTERREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """A request to collect cluster diagnostic information. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - cluster belongs to. - region: - Required. The Dataproc region in which to handle the request. - cluster_name: - Required. The cluster name. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DiagnoseClusterRequest) - }, -) -_sym_db.RegisterMessage(DiagnoseClusterRequest) - -DiagnoseClusterResults = _reflection.GeneratedProtocolMessageType( - "DiagnoseClusterResults", - (_message.Message,), - { - "DESCRIPTOR": _DIAGNOSECLUSTERRESULTS, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """The location of diagnostic output. - - Attributes: - output_uri: - Output only. The Cloud Storage URI of the diagnostic output. - The output report is a plain text file with a summary of - collected diagnostics. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DiagnoseClusterResults) - }, -) -_sym_db.RegisterMessage(DiagnoseClusterResults) - -ReservationAffinity = _reflection.GeneratedProtocolMessageType( - "ReservationAffinity", - (_message.Message,), - { - "DESCRIPTOR": _RESERVATIONAFFINITY, - "__module__": "google.cloud.dataproc_v1beta2.proto.clusters_pb2", - "__doc__": """Reservation Affinity for consuming Zonal reservation. - - Attributes: - consume_reservation_type: - Optional. Type of reservation to consume - key: - Optional. Corresponds to the label key of reservation - resource. - values: - Optional. Corresponds to the label values of reservation - resource. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ReservationAffinity) - }, -) -_sym_db.RegisterMessage(ReservationAffinity) - - -DESCRIPTOR._options = None -_CLUSTER_LABELSENTRY._options = None -_CLUSTER.fields_by_name["project_id"]._options = None -_CLUSTER.fields_by_name["cluster_name"]._options = None -_CLUSTER.fields_by_name["config"]._options = None -_CLUSTER.fields_by_name["labels"]._options = None -_CLUSTER.fields_by_name["status"]._options = None -_CLUSTER.fields_by_name["status_history"]._options = None -_CLUSTER.fields_by_name["cluster_uuid"]._options = None -_CLUSTER.fields_by_name["metrics"]._options = None -_CLUSTERCONFIG.fields_by_name["config_bucket"]._options = None -_CLUSTERCONFIG.fields_by_name["gce_cluster_config"]._options = None -_CLUSTERCONFIG.fields_by_name["master_config"]._options = None -_CLUSTERCONFIG.fields_by_name["worker_config"]._options = None -_CLUSTERCONFIG.fields_by_name["secondary_worker_config"]._options = None -_CLUSTERCONFIG.fields_by_name["software_config"]._options = None -_CLUSTERCONFIG.fields_by_name["lifecycle_config"]._options = None -_CLUSTERCONFIG.fields_by_name["initialization_actions"]._options = None -_CLUSTERCONFIG.fields_by_name["encryption_config"]._options = None -_CLUSTERCONFIG.fields_by_name["autoscaling_config"]._options = None -_CLUSTERCONFIG.fields_by_name["endpoint_config"]._options = None -_CLUSTERCONFIG.fields_by_name["security_config"]._options = None -_CLUSTERCONFIG.fields_by_name["gke_cluster_config"]._options = None -_GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET.fields_by_name[ - "target_gke_cluster" -]._options = None -_GKECLUSTERCONFIG_NAMESPACEDGKEDEPLOYMENTTARGET.fields_by_name[ - "cluster_namespace" -]._options = None -_GKECLUSTERCONFIG.fields_by_name["namespaced_gke_deployment_target"]._options = None -_ENDPOINTCONFIG_HTTPPORTSENTRY._options = None -_ENDPOINTCONFIG.fields_by_name["http_ports"]._options = None -_ENDPOINTCONFIG.fields_by_name["enable_http_port_access"]._options = None -_AUTOSCALINGCONFIG.fields_by_name["policy_uri"]._options = None -_ENCRYPTIONCONFIG.fields_by_name["gce_pd_kms_key_name"]._options = None -_GCECLUSTERCONFIG_METADATAENTRY._options = None -_GCECLUSTERCONFIG.fields_by_name["zone_uri"]._options = None -_GCECLUSTERCONFIG.fields_by_name["network_uri"]._options = None -_GCECLUSTERCONFIG.fields_by_name["subnetwork_uri"]._options = None -_GCECLUSTERCONFIG.fields_by_name["internal_ip_only"]._options = None -_GCECLUSTERCONFIG.fields_by_name["service_account"]._options = None -_GCECLUSTERCONFIG.fields_by_name["service_account_scopes"]._options = None -_GCECLUSTERCONFIG.fields_by_name["reservation_affinity"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["num_instances"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["instance_names"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["image_uri"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["machine_type_uri"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["disk_config"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["is_preemptible"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["managed_group_config"]._options = None -_INSTANCEGROUPCONFIG.fields_by_name["accelerators"]._options = None -_MANAGEDGROUPCONFIG.fields_by_name["instance_template_name"]._options = None -_MANAGEDGROUPCONFIG.fields_by_name["instance_group_manager_name"]._options = None -_DISKCONFIG.fields_by_name["boot_disk_type"]._options = None -_DISKCONFIG.fields_by_name["boot_disk_size_gb"]._options = None -_LIFECYCLECONFIG.fields_by_name["idle_delete_ttl"]._options = None -_LIFECYCLECONFIG.fields_by_name["auto_delete_time"]._options = None -_LIFECYCLECONFIG.fields_by_name["auto_delete_ttl"]._options = None -_LIFECYCLECONFIG.fields_by_name["idle_start_time"]._options = None -_KERBEROSCONFIG.fields_by_name["enable_kerberos"]._options = None -_KERBEROSCONFIG.fields_by_name["root_principal_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["kms_key_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["keystore_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["truststore_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["keystore_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["key_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["truststore_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_realm"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_kdc"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_admin_server"]._options = None -_KERBEROSCONFIG.fields_by_name["cross_realm_trust_shared_password_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["kdc_db_key_uri"]._options = None -_KERBEROSCONFIG.fields_by_name["tgt_lifetime_hours"]._options = None -_KERBEROSCONFIG.fields_by_name["realm"]._options = None -_NODEINITIALIZATIONACTION.fields_by_name["executable_file"]._options = None -_NODEINITIALIZATIONACTION.fields_by_name["execution_timeout"]._options = None -_CLUSTERSTATUS.fields_by_name["state"]._options = None -_CLUSTERSTATUS.fields_by_name["detail"]._options = None -_CLUSTERSTATUS.fields_by_name["state_start_time"]._options = None -_CLUSTERSTATUS.fields_by_name["substate"]._options = None -_SOFTWARECONFIG_PROPERTIESENTRY._options = None -_SOFTWARECONFIG.fields_by_name["image_version"]._options = None -_SOFTWARECONFIG.fields_by_name["properties"]._options = None -_CLUSTERMETRICS_HDFSMETRICSENTRY._options = None -_CLUSTERMETRICS_YARNMETRICSENTRY._options = None -_CREATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["region"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_CREATECLUSTERREQUEST.fields_by_name["request_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["region"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["cluster"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["graceful_decommission_timeout"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["update_mask"]._options = None -_UPDATECLUSTERREQUEST.fields_by_name["request_id"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["region"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["cluster_uuid"]._options = None -_DELETECLUSTERREQUEST.fields_by_name["request_id"]._options = None -_GETCLUSTERREQUEST.fields_by_name["project_id"]._options = None -_GETCLUSTERREQUEST.fields_by_name["region"]._options = None -_GETCLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["project_id"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["region"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["filter"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["page_size"]._options = None -_LISTCLUSTERSREQUEST.fields_by_name["page_token"]._options = None -_LISTCLUSTERSRESPONSE.fields_by_name["clusters"]._options = None -_LISTCLUSTERSRESPONSE.fields_by_name["next_page_token"]._options = None -_DIAGNOSECLUSTERREQUEST.fields_by_name["project_id"]._options = None -_DIAGNOSECLUSTERREQUEST.fields_by_name["region"]._options = None -_DIAGNOSECLUSTERREQUEST.fields_by_name["cluster_name"]._options = None -_DIAGNOSECLUSTERRESULTS.fields_by_name["output_uri"]._options = None -_RESERVATIONAFFINITY.fields_by_name["consume_reservation_type"]._options = None -_RESERVATIONAFFINITY.fields_by_name["key"]._options = None -_RESERVATIONAFFINITY.fields_by_name["values"]._options = None - -_CLUSTERCONTROLLER = _descriptor.ServiceDescriptor( - name="ClusterController", - full_name="google.cloud.dataproc.v1beta2.ClusterController", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=6976, - serialized_end=8743, - methods=[ - _descriptor.MethodDescriptor( - name="CreateCluster", - full_name="google.cloud.dataproc.v1beta2.ClusterController.CreateCluster", - index=0, - containing_service=None, - input_type=_CREATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002C"8/v1beta2/projects/{project_id}/regions/{region}/clusters:\007cluster\332A\033project_id, region, cluster\312AA\n\007Cluster\0226google.cloud.dataproc.v1beta2.ClusterOperationMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateCluster", - full_name="google.cloud.dataproc.v1beta2.ClusterController.UpdateCluster", - index=1, - containing_service=None, - input_type=_UPDATECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002R2G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:\007cluster\332A6project_id, region, cluster_name, cluster, update_mask\312AA\n\007Cluster\0226google.cloud.dataproc.v1beta2.ClusterOperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteCluster", - full_name="google.cloud.dataproc.v1beta2.ClusterController.DeleteCluster", - index=2, - containing_service=None, - input_type=_DELETECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b"\202\323\344\223\002I*G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\332A project_id, region, cluster_name\312AO\n\025google.protobuf.Empty\0226google.cloud.dataproc.v1beta2.ClusterOperationMetadata", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetCluster", - full_name="google.cloud.dataproc.v1beta2.ClusterController.GetCluster", - index=3, - containing_service=None, - input_type=_GETCLUSTERREQUEST, - output_type=_CLUSTER, - serialized_options=b"\202\323\344\223\002I\022G/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}\332A project_id, region, cluster_name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListClusters", - full_name="google.cloud.dataproc.v1beta2.ClusterController.ListClusters", - index=4, - containing_service=None, - input_type=_LISTCLUSTERSREQUEST, - output_type=_LISTCLUSTERSRESPONSE, - serialized_options=b"\202\323\344\223\002:\0228/v1beta2/projects/{project_id}/regions/{region}/clusters\332A\022project_id, region\332A\032project_id, region, filter", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DiagnoseCluster", - full_name="google.cloud.dataproc.v1beta2.ClusterController.DiagnoseCluster", - index=5, - containing_service=None, - input_type=_DIAGNOSECLUSTERREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002U"P/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose:\001*\332A project_id, region, cluster_name\312AO\n\025google.protobuf.Empty\0226google.cloud.dataproc.v1beta2.ClusterOperationMetadata', - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_CLUSTERCONTROLLER) - -DESCRIPTOR.services_by_name["ClusterController"] = _CLUSTERCONTROLLER - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py deleted file mode 100644 index e5b15eec..00000000 --- a/google/cloud/dataproc_v1beta2/proto/clusters_pb2_grpc.py +++ /dev/null @@ -1,321 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.dataproc_v1beta2.proto import ( - clusters_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) - - -class ClusterControllerStub(object): - """The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateCluster = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.CreateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UpdateCluster = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.UpdateClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.DeleteCluster = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DeleteClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetCluster = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.GetClusterRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.Cluster.FromString, - ) - self.ListClusters = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersResponse.FromString, - ) - self.DiagnoseCluster = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - - -class ClusterControllerServicer(object): - """The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - def CreateCluster(self, request, context): - """Creates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateCluster(self, request, context): - """Updates a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteCluster(self, request, context): - """Deletes a cluster in a project. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetCluster(self, request, context): - """Gets the resource representation for a cluster in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListClusters(self, request, context): - """Lists all regions/{region}/clusters in a project alphabetically. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DiagnoseCluster(self, request, context): - """Gets cluster diagnostic information. The returned - [Operation.metadata][google.longrunning.Operation.metadata] will be - [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata). - After the operation completes, - [Operation.response][google.longrunning.Operation.response] - contains - [Empty][google.protobuf.Empty]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_ClusterControllerServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateCluster": grpc.unary_unary_rpc_method_handler( - servicer.CreateCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.CreateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UpdateCluster": grpc.unary_unary_rpc_method_handler( - servicer.UpdateCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.UpdateClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "DeleteCluster": grpc.unary_unary_rpc_method_handler( - servicer.DeleteCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DeleteClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetCluster": grpc.unary_unary_rpc_method_handler( - servicer.GetCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.GetClusterRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.Cluster.SerializeToString, - ), - "ListClusters": grpc.unary_unary_rpc_method_handler( - servicer.ListClusters, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersResponse.SerializeToString, - ), - "DiagnoseCluster": grpc.unary_unary_rpc_method_handler( - servicer.DiagnoseCluster, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.dataproc.v1beta2.ClusterController", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class ClusterController(object): - """The ClusterControllerService provides methods to manage clusters - of Compute Engine instances. - """ - - @staticmethod - def CreateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.CreateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.UpdateClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DeleteClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.GetClusterRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.Cluster.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListClusters( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.ListClustersResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DiagnoseCluster( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DiagnoseClusterRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py b/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py deleted file mode 100644 index c8affffc..00000000 --- a/google/cloud/dataproc_v1beta2/proto/jobs_pb2.py +++ /dev/null @@ -1,5425 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1beta2/proto/jobs.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1beta2/proto/jobs.proto", - package="google.cloud.dataproc.v1beta2", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.dataproc.v1beta2B\tJobsProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n.google/cloud/dataproc_v1beta2/proto/jobs.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xcb\x02\n\rLoggingConfig\x12\\\n\x11\x64river_log_levels\x18\x02 \x03(\x0b\x32\x41.google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry\x1aj\n\x14\x44riverLogLevelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x41\n\x05value\x18\x02 \x01(\x0e\x32\x32.google.cloud.dataproc.v1beta2.LoggingConfig.Level:\x02\x38\x01"p\n\x05Level\x12\x15\n\x11LEVEL_UNSPECIFIED\x10\x00\x12\x07\n\x03\x41LL\x10\x01\x12\t\n\x05TRACE\x10\x02\x12\t\n\x05\x44\x45\x42UG\x10\x03\x12\x08\n\x04INFO\x10\x04\x12\x08\n\x04WARN\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\t\n\x05\x46\x41TAL\x10\x07\x12\x07\n\x03OFF\x10\x08"\xfb\x02\n\tHadoopJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x07 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\xf9\x02\n\x08SparkJob\x12\x1b\n\x11main_jar_file_uri\x18\x01 \x01(\tH\x00\x12\x14\n\nmain_class\x18\x02 \x01(\tH\x00\x12\x11\n\x04\x61rgs\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12P\n\nproperties\x18\x07 \x03(\x0b\x32\x37.google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06\x64river"\x82\x03\n\nPySparkJob\x12!\n\x14main_python_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x1d\n\x10python_file_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12R\n\nproperties\x18\x07 \x03(\x0b\x32\x39.google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x08 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"!\n\tQueryList\x12\x14\n\x07queries\x18\x01 \x03(\tB\x03\xe0\x41\x02"\xc4\x03\n\x07HiveJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12Z\n\x10script_variables\x18\x04 \x03(\x0b\x32;.google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12O\n\nproperties\x18\x05 \x03(\x0b\x32\x36.google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xf9\x03\n\x0bSparkSqlJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12^\n\x10script_variables\x18\x03 \x03(\x0b\x32?.google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12S\n\nproperties\x18\x04 \x03(\x0b\x32:.google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x38 \x03(\tB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x06 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\x8c\x04\n\x06PigJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12Y\n\x10script_variables\x18\x04 \x03(\x0b\x32:.google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntryB\x03\xe0\x41\x01\x12N\n\nproperties\x18\x05 \x03(\x0b\x32\x35.google.cloud.dataproc.v1beta2.PigJob.PropertiesEntryB\x03\xe0\x41\x01\x12\x1a\n\rjar_file_uris\x18\x06 \x03(\tB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x36\n\x14ScriptVariablesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"\xc0\x02\n\tSparkRJob\x12\x1c\n\x0fmain_r_file_uri\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\x04\x61rgs\x18\x02 \x03(\tB\x03\xe0\x41\x01\x12\x16\n\tfile_uris\x18\x03 \x03(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x61rchive_uris\x18\x04 \x03(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x05 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x06 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x99\x03\n\tPrestoJob\x12\x18\n\x0equery_file_uri\x18\x01 \x01(\tH\x00\x12>\n\nquery_list\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.QueryListH\x00\x12 \n\x13\x63ontinue_on_failure\x18\x03 \x01(\x08\x42\x03\xe0\x41\x01\x12\x1a\n\routput_format\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12\x18\n\x0b\x63lient_tags\x18\x05 \x03(\tB\x03\xe0\x41\x01\x12Q\n\nproperties\x18\x06 \x03(\x0b\x32\x38.google.cloud.dataproc.v1beta2.PrestoJob.PropertiesEntryB\x03\xe0\x41\x01\x12I\n\x0elogging_config\x18\x07 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.LoggingConfigB\x03\xe0\x41\x01\x1a\x31\n\x0fPropertiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07queries"D\n\x0cJobPlacement\x12\x19\n\x0c\x63luster_name\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x19\n\x0c\x63luster_uuid\x18\x02 \x01(\tB\x03\xe0\x41\x03"\xe0\x03\n\tJobStatus\x12\x42\n\x05state\x18\x01 \x01(\x0e\x32..google.cloud.dataproc.v1beta2.JobStatus.StateB\x03\xe0\x41\x03\x12\x14\n\x07\x64\x65tails\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x39\n\x10state_start_time\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12H\n\x08substate\x18\x07 \x01(\x0e\x32\x31.google.cloud.dataproc.v1beta2.JobStatus.SubstateB\x03\xe0\x41\x03"\xa9\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0e\n\nSETUP_DONE\x10\x08\x12\x0b\n\x07RUNNING\x10\x02\x12\x12\n\x0e\x43\x41NCEL_PENDING\x10\x03\x12\x12\n\x0e\x43\x41NCEL_STARTED\x10\x07\x12\r\n\tCANCELLED\x10\x04\x12\x08\n\x04\x44ONE\x10\x05\x12\t\n\x05\x45RROR\x10\x06\x12\x13\n\x0f\x41TTEMPT_FAILURE\x10\t"H\n\x08Substate\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tSUBMITTED\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\x10\n\x0cSTALE_STATUS\x10\x03"<\n\x0cJobReference\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x01"\xaa\x02\n\x0fYarnApplication\x12\x11\n\x04name\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12H\n\x05state\x18\x02 \x01(\x0e\x32\x34.google.cloud.dataproc.v1beta2.YarnApplication.StateB\x03\xe0\x41\x03\x12\x15\n\x08progress\x18\x03 \x01(\x02\x42\x03\xe0\x41\x03\x12\x19\n\x0ctracking_url\x18\x04 \x01(\tB\x03\xe0\x41\x03"\x87\x01\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nNEW_SAVING\x10\x02\x12\r\n\tSUBMITTED\x10\x03\x12\x0c\n\x08\x41\x43\x43\x45PTED\x10\x04\x12\x0b\n\x07RUNNING\x10\x05\x12\x0c\n\x08\x46INISHED\x10\x06\x12\n\n\x06\x46\x41ILED\x10\x07\x12\n\n\x06KILLED\x10\x08"\xe5\t\n\x03Job\x12\x43\n\treference\x18\x01 \x01(\x0b\x32+.google.cloud.dataproc.v1beta2.JobReferenceB\x03\xe0\x41\x01\x12\x43\n\tplacement\x18\x02 \x01(\x0b\x32+.google.cloud.dataproc.v1beta2.JobPlacementB\x03\xe0\x41\x02\x12\x43\n\nhadoop_job\x18\x03 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.HadoopJobB\x03\xe0\x41\x01H\x00\x12\x41\n\tspark_job\x18\x04 \x01(\x0b\x32\'.google.cloud.dataproc.v1beta2.SparkJobB\x03\xe0\x41\x01H\x00\x12\x45\n\x0bpyspark_job\x18\x05 \x01(\x0b\x32).google.cloud.dataproc.v1beta2.PySparkJobB\x03\xe0\x41\x01H\x00\x12?\n\x08hive_job\x18\x06 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.HiveJobB\x03\xe0\x41\x01H\x00\x12=\n\x07pig_job\x18\x07 \x01(\x0b\x32%.google.cloud.dataproc.v1beta2.PigJobB\x03\xe0\x41\x01H\x00\x12\x44\n\x0bspark_r_job\x18\x15 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.SparkRJobB\x03\xe0\x41\x01H\x00\x12H\n\rspark_sql_job\x18\x0c \x01(\x0b\x32*.google.cloud.dataproc.v1beta2.SparkSqlJobB\x03\xe0\x41\x01H\x00\x12\x43\n\npresto_job\x18\x17 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.PrestoJobB\x03\xe0\x41\x01H\x00\x12=\n\x06status\x18\x08 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatusB\x03\xe0\x41\x03\x12\x45\n\x0estatus_history\x18\r \x03(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatusB\x03\xe0\x41\x03\x12N\n\x11yarn_applications\x18\t \x03(\x0b\x32..google.cloud.dataproc.v1beta2.YarnApplicationB\x03\xe0\x41\x03\x12\x19\n\x0csubmitted_by\x18\n \x01(\tB\x03\xe0\x41\x03\x12\'\n\x1a\x64river_output_resource_uri\x18\x11 \x01(\tB\x03\xe0\x41\x03\x12%\n\x18\x64river_control_files_uri\x18\x0f \x01(\tB\x03\xe0\x41\x03\x12\x43\n\x06labels\x18\x12 \x03(\x0b\x32..google.cloud.dataproc.v1beta2.Job.LabelsEntryB\x03\xe0\x41\x01\x12\x45\n\nscheduling\x18\x14 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.JobSchedulingB\x03\xe0\x41\x01\x12\x15\n\x08job_uuid\x18\x16 \x01(\tB\x03\xe0\x41\x03\x12\x11\n\x04\x64one\x18\x18 \x01(\x08\x42\x03\xe0\x41\x03\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08type_job"3\n\rJobScheduling\x12"\n\x15max_failures_per_hour\x18\x01 \x01(\x05\x42\x03\xe0\x41\x01"\xb3\x01\n\x0bJobMetadata\x12\x13\n\x06job_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12=\n\x06status\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.JobStatusB\x03\xe0\x41\x03\x12\x1b\n\x0eoperation_type\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12\x33\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03"\x8f\x01\n\x10SubmitJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x03job\x18\x02 \x01(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x02\x12\x17\n\nrequest_id\x18\x04 \x01(\tB\x03\xe0\x41\x01"R\n\rGetJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"\xb8\x02\n\x0fListJobsRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x06 \x01(\tB\x03\xe0\x41\x02\x12\x16\n\tpage_size\x18\x02 \x01(\x05\x42\x03\xe0\x41\x01\x12\x17\n\npage_token\x18\x03 \x01(\tB\x03\xe0\x41\x01\x12\x19\n\x0c\x63luster_name\x18\x04 \x01(\tB\x03\xe0\x41\x01\x12^\n\x11job_state_matcher\x18\x05 \x01(\x0e\x32>.google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcherB\x03\xe0\x41\x01\x12\x13\n\x06\x66ilter\x18\x07 \x01(\tB\x03\xe0\x41\x01"6\n\x0fJobStateMatcher\x12\x07\n\x03\x41LL\x10\x00\x12\n\n\x06\x41\x43TIVE\x10\x01\x12\x0e\n\nNON_ACTIVE\x10\x02"\xc1\x01\n\x10UpdateJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x34\n\x03job\x18\x04 \x01(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"g\n\x10ListJobsResponse\x12\x35\n\x04jobs\x18\x01 \x03(\x0b\x32".google.cloud.dataproc.v1beta2.JobB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x01"U\n\x10\x43\x61ncelJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02"U\n\x10\x44\x65leteJobRequest\x12\x17\n\nproject_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06region\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x13\n\x06job_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x32\xe7\x0b\n\rJobController\x12\xc2\x01\n\tSubmitJob\x12/.google.cloud.dataproc.v1beta2.SubmitJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"`\x82\xd3\xe4\x93\x02@";/v1beta2/projects/{project_id}/regions/{region}/jobs:submit:\x01*\xda\x41\x17project_id, region, job\x12\xe9\x01\n\x14SubmitJobAsOperation\x12/.google.cloud.dataproc.v1beta2.SubmitJobRequest\x1a\x1d.google.longrunning.Operation"\x80\x01\x82\xd3\xe4\x93\x02K"F/v1beta2/projects/{project_id}/regions/{region}/jobs:submitAsOperation:\x01*\xda\x41\x17project_id, region, job\xca\x41\x12\n\x03Job\x12\x0bJobMetadata\x12\xbe\x01\n\x06GetJob\x12,.google.cloud.dataproc.v1beta2.GetJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"b\x82\xd3\xe4\x93\x02?\x12=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x1aproject_id, region, job_id\x12\xdb\x01\n\x08ListJobs\x12..google.cloud.dataproc.v1beta2.ListJobsRequest\x1a/.google.cloud.dataproc.v1beta2.ListJobsResponse"n\x82\xd3\xe4\x93\x02\x36\x12\x34/v1beta2/projects/{project_id}/regions/{region}/jobs\xda\x41\x12project_id, region\xda\x41\x1aproject_id, region, filter\x12\xac\x01\n\tUpdateJob\x12/.google.cloud.dataproc.v1beta2.UpdateJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"J\x82\xd3\xe4\x93\x02\x44\x32=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:\x03job\x12\xce\x01\n\tCancelJob\x12/.google.cloud.dataproc.v1beta2.CancelJobRequest\x1a".google.cloud.dataproc.v1beta2.Job"l\x82\xd3\xe4\x93\x02I"D/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\x01*\xda\x41\x1aproject_id, region, job_id\x12\xb8\x01\n\tDeleteJob\x12/.google.cloud.dataproc.v1beta2.DeleteJobRequest\x1a\x16.google.protobuf.Empty"b\x82\xd3\xe4\x93\x02?*=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\xda\x41\x1aproject_id, region, job_id\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformBw\n!com.google.cloud.dataproc.v1beta2B\tJobsProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_LOGGINGCONFIG_LEVEL = _descriptor.EnumDescriptor( - name="Level", - full_name="google.cloud.dataproc.v1beta2.LoggingConfig.Level", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="LEVEL_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ALL", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="TRACE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DEBUG", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="INFO", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="WARN", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ERROR", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FATAL", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="OFF", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=522, - serialized_end=634, -) -_sym_db.RegisterEnumDescriptor(_LOGGINGCONFIG_LEVEL) - -_JOBSTATUS_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1beta2.JobStatus.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PENDING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SETUP_DONE", - index=2, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=3, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CANCEL_PENDING", - index=4, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CANCEL_STARTED", - index=5, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="CANCELLED", - index=6, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DONE", - index=7, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ERROR", - index=8, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ATTEMPT_FAILURE", - index=9, - number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4355, - serialized_end=4524, -) -_sym_db.RegisterEnumDescriptor(_JOBSTATUS_STATE) - -_JOBSTATUS_SUBSTATE = _descriptor.EnumDescriptor( - name="Substate", - full_name="google.cloud.dataproc.v1beta2.JobStatus.Substate", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SUBMITTED", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="QUEUED", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="STALE_STATUS", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4526, - serialized_end=4598, -) -_sym_db.RegisterEnumDescriptor(_JOBSTATUS_SUBSTATE) - -_YARNAPPLICATION_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1beta2.YarnApplication.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="STATE_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NEW", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NEW_SAVING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SUBMITTED", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ACCEPTED", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FINISHED", - index=6, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FAILED", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="KILLED", - index=8, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=4826, - serialized_end=4961, -) -_sym_db.RegisterEnumDescriptor(_YARNAPPLICATION_STATE) - -_LISTJOBSREQUEST_JOBSTATEMATCHER = _descriptor.EnumDescriptor( - name="JobStateMatcher", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.JobStateMatcher", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="ALL", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ACTIVE", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="NON_ACTIVE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=6943, - serialized_end=6997, -) -_sym_db.RegisterEnumDescriptor(_LISTJOBSREQUEST_JOBSTATEMATCHER) - - -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY = _descriptor.Descriptor( - name="DriverLogLevelsEntry", - full_name="google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry.value", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=414, - serialized_end=520, -) - -_LOGGINGCONFIG = _descriptor.Descriptor( - name="LoggingConfig", - full_name="google.cloud.dataproc.v1beta2.LoggingConfig", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="driver_log_levels", - full_name="google.cloud.dataproc.v1beta2.LoggingConfig.driver_log_levels", - index=0, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY,], - enum_types=[_LOGGINGCONFIG_LEVEL,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=303, - serialized_end=634, -) - - -_HADOOPJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_HADOOPJOB = _descriptor.Descriptor( - name="HadoopJob", - full_name="google.cloud.dataproc.v1beta2.HadoopJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_jar_file_uri", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.main_jar_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="main_class", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.main_class", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.args", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.jar_file_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.file_uris", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.archive_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.properties", - index=6, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.logging_config", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_HADOOPJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="driver", - full_name="google.cloud.dataproc.v1beta2.HadoopJob.driver", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=637, - serialized_end=1016, -) - - -_SPARKJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_SPARKJOB = _descriptor.Descriptor( - name="SparkJob", - full_name="google.cloud.dataproc.v1beta2.SparkJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_jar_file_uri", - full_name="google.cloud.dataproc.v1beta2.SparkJob.main_jar_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="main_class", - full_name="google.cloud.dataproc.v1beta2.SparkJob.main_class", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1beta2.SparkJob.args", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1beta2.SparkJob.jar_file_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1beta2.SparkJob.file_uris", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1beta2.SparkJob.archive_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.SparkJob.properties", - index=6, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.SparkJob.logging_config", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SPARKJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="driver", - full_name="google.cloud.dataproc.v1beta2.SparkJob.driver", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1019, - serialized_end=1396, -) - - -_PYSPARKJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_PYSPARKJOB = _descriptor.Descriptor( - name="PySparkJob", - full_name="google.cloud.dataproc.v1beta2.PySparkJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_python_file_uri", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.main_python_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.args", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="python_file_uris", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.python_file_uris", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.jar_file_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.file_uris", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.archive_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.properties", - index=6, - number=7, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.PySparkJob.logging_config", - index=7, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PYSPARKJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1399, - serialized_end=1785, -) - - -_QUERYLIST = _descriptor.Descriptor( - name="QueryList", - full_name="google.cloud.dataproc.v1beta2.QueryList", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1beta2.QueryList.queries", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1787, - serialized_end=1820, -) - - -_HIVEJOB_SCRIPTVARIABLESENTRY = _descriptor.Descriptor( - name="ScriptVariablesEntry", - full_name="google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2159, - serialized_end=2213, -) - -_HIVEJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_HIVEJOB = _descriptor.Descriptor( - name="HiveJob", - full_name="google.cloud.dataproc.v1beta2.HiveJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1beta2.HiveJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1beta2.HiveJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="continue_on_failure", - full_name="google.cloud.dataproc.v1beta2.HiveJob.continue_on_failure", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="script_variables", - full_name="google.cloud.dataproc.v1beta2.HiveJob.script_variables", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.HiveJob.properties", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1beta2.HiveJob.jar_file_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_HIVEJOB_SCRIPTVARIABLESENTRY, _HIVEJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1beta2.HiveJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1823, - serialized_end=2275, -) - - -_SPARKSQLJOB_SCRIPTVARIABLESENTRY = _descriptor.Descriptor( - name="ScriptVariablesEntry", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2159, - serialized_end=2213, -) - -_SPARKSQLJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_SPARKSQLJOB = _descriptor.Descriptor( - name="SparkSqlJob", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="script_variables", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.script_variables", - index=2, - number=3, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.properties", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.jar_file_uris", - index=4, - number=56, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.logging_config", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SPARKSQLJOB_SCRIPTVARIABLESENTRY, _SPARKSQLJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1beta2.SparkSqlJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2278, - serialized_end=2783, -) - - -_PIGJOB_SCRIPTVARIABLESENTRY = _descriptor.Descriptor( - name="ScriptVariablesEntry", - full_name="google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2159, - serialized_end=2213, -) - -_PIGJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_PIGJOB = _descriptor.Descriptor( - name="PigJob", - full_name="google.cloud.dataproc.v1beta2.PigJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1beta2.PigJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1beta2.PigJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="continue_on_failure", - full_name="google.cloud.dataproc.v1beta2.PigJob.continue_on_failure", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="script_variables", - full_name="google.cloud.dataproc.v1beta2.PigJob.script_variables", - index=3, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.PigJob.properties", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jar_file_uris", - full_name="google.cloud.dataproc.v1beta2.PigJob.jar_file_uris", - index=5, - number=6, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.PigJob.logging_config", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PIGJOB_SCRIPTVARIABLESENTRY, _PIGJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1beta2.PigJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2786, - serialized_end=3310, -) - - -_SPARKRJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_SPARKRJOB = _descriptor.Descriptor( - name="SparkRJob", - full_name="google.cloud.dataproc.v1beta2.SparkRJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="main_r_file_uri", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.main_r_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="args", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.args", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="file_uris", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.file_uris", - index=2, - number=3, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="archive_uris", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.archive_uris", - index=3, - number=4, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.properties", - index=4, - number=5, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.SparkRJob.logging_config", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_SPARKRJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3313, - serialized_end=3633, -) - - -_PRESTOJOB_PROPERTIESENTRY = _descriptor.Descriptor( - name="PropertiesEntry", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.PropertiesEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.PropertiesEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.PropertiesEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=957, - serialized_end=1006, -) - -_PRESTOJOB = _descriptor.Descriptor( - name="PrestoJob", - full_name="google.cloud.dataproc.v1beta2.PrestoJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="query_file_uri", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.query_file_uri", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="query_list", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.query_list", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="continue_on_failure", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.continue_on_failure", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="output_format", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.output_format", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="client_tags", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.client_tags", - index=4, - number=5, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="properties", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.properties", - index=5, - number=6, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="logging_config", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.logging_config", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_PRESTOJOB_PROPERTIESENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="queries", - full_name="google.cloud.dataproc.v1beta2.PrestoJob.queries", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=3636, - serialized_end=4045, -) - - -_JOBPLACEMENT = _descriptor.Descriptor( - name="JobPlacement", - full_name="google.cloud.dataproc.v1beta2.JobPlacement", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.JobPlacement.cluster_name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1beta2.JobPlacement.cluster_uuid", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4047, - serialized_end=4115, -) - - -_JOBSTATUS = _descriptor.Descriptor( - name="JobStatus", - full_name="google.cloud.dataproc.v1beta2.JobStatus", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1beta2.JobStatus.state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="details", - full_name="google.cloud.dataproc.v1beta2.JobStatus.details", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state_start_time", - full_name="google.cloud.dataproc.v1beta2.JobStatus.state_start_time", - index=2, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="substate", - full_name="google.cloud.dataproc.v1beta2.JobStatus.substate", - index=3, - number=7, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_JOBSTATUS_STATE, _JOBSTATUS_SUBSTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4118, - serialized_end=4598, -) - - -_JOBREFERENCE = _descriptor.Descriptor( - name="JobReference", - full_name="google.cloud.dataproc.v1beta2.JobReference", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.JobReference.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.JobReference.job_id", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4600, - serialized_end=4660, -) - - -_YARNAPPLICATION = _descriptor.Descriptor( - name="YarnApplication", - full_name="google.cloud.dataproc.v1beta2.YarnApplication", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1beta2.YarnApplication.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1beta2.YarnApplication.state", - index=1, - number=2, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="progress", - full_name="google.cloud.dataproc.v1beta2.YarnApplication.progress", - index=2, - number=3, - type=2, - cpp_type=6, - label=1, - has_default_value=False, - default_value=float(0), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="tracking_url", - full_name="google.cloud.dataproc.v1beta2.YarnApplication.tracking_url", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_YARNAPPLICATION_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4663, - serialized_end=4961, -) - - -_JOB_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1beta2.Job.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.Job.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.Job.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6160, - serialized_end=6205, -) - -_JOB = _descriptor.Descriptor( - name="Job", - full_name="google.cloud.dataproc.v1beta2.Job", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="reference", - full_name="google.cloud.dataproc.v1beta2.Job.reference", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="placement", - full_name="google.cloud.dataproc.v1beta2.Job.placement", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="hadoop_job", - full_name="google.cloud.dataproc.v1beta2.Job.hadoop_job", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_job", - full_name="google.cloud.dataproc.v1beta2.Job.spark_job", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pyspark_job", - full_name="google.cloud.dataproc.v1beta2.Job.pyspark_job", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="hive_job", - full_name="google.cloud.dataproc.v1beta2.Job.hive_job", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pig_job", - full_name="google.cloud.dataproc.v1beta2.Job.pig_job", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_r_job", - full_name="google.cloud.dataproc.v1beta2.Job.spark_r_job", - index=7, - number=21, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_sql_job", - full_name="google.cloud.dataproc.v1beta2.Job.spark_sql_job", - index=8, - number=12, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="presto_job", - full_name="google.cloud.dataproc.v1beta2.Job.presto_job", - index=9, - number=23, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1beta2.Job.status", - index=10, - number=8, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status_history", - full_name="google.cloud.dataproc.v1beta2.Job.status_history", - index=11, - number=13, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="yarn_applications", - full_name="google.cloud.dataproc.v1beta2.Job.yarn_applications", - index=12, - number=9, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="submitted_by", - full_name="google.cloud.dataproc.v1beta2.Job.submitted_by", - index=13, - number=10, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="driver_output_resource_uri", - full_name="google.cloud.dataproc.v1beta2.Job.driver_output_resource_uri", - index=14, - number=17, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="driver_control_files_uri", - full_name="google.cloud.dataproc.v1beta2.Job.driver_control_files_uri", - index=15, - number=15, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1beta2.Job.labels", - index=16, - number=18, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="scheduling", - full_name="google.cloud.dataproc.v1beta2.Job.scheduling", - index=17, - number=20, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_uuid", - full_name="google.cloud.dataproc.v1beta2.Job.job_uuid", - index=18, - number=22, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="done", - full_name="google.cloud.dataproc.v1beta2.Job.done", - index=19, - number=24, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_JOB_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="type_job", - full_name="google.cloud.dataproc.v1beta2.Job.type_job", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=4964, - serialized_end=6217, -) - - -_JOBSCHEDULING = _descriptor.Descriptor( - name="JobScheduling", - full_name="google.cloud.dataproc.v1beta2.JobScheduling", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="max_failures_per_hour", - full_name="google.cloud.dataproc.v1beta2.JobScheduling.max_failures_per_hour", - index=0, - number=1, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6219, - serialized_end=6270, -) - - -_JOBMETADATA = _descriptor.Descriptor( - name="JobMetadata", - full_name="google.cloud.dataproc.v1beta2.JobMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.JobMetadata.job_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1beta2.JobMetadata.status", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="operation_type", - full_name="google.cloud.dataproc.v1beta2.JobMetadata.operation_type", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.cloud.dataproc.v1beta2.JobMetadata.start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6273, - serialized_end=6452, -) - - -_SUBMITJOBREQUEST = _descriptor.Descriptor( - name="SubmitJobRequest", - full_name="google.cloud.dataproc.v1beta2.SubmitJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.SubmitJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.SubmitJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job", - full_name="google.cloud.dataproc.v1beta2.SubmitJobRequest.job", - index=2, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1beta2.SubmitJobRequest.request_id", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6455, - serialized_end=6598, -) - - -_GETJOBREQUEST = _descriptor.Descriptor( - name="GetJobRequest", - full_name="google.cloud.dataproc.v1beta2.GetJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.GetJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.GetJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.GetJobRequest.job_id", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6600, - serialized_end=6682, -) - - -_LISTJOBSREQUEST = _descriptor.Descriptor( - name="ListJobsRequest", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.region", - index=1, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.page_size", - index=2, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.page_token", - index=3, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.cluster_name", - index=4, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_state_matcher", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.job_state_matcher", - index=5, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="filter", - full_name="google.cloud.dataproc.v1beta2.ListJobsRequest.filter", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_LISTJOBSREQUEST_JOBSTATEMATCHER,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=6685, - serialized_end=6997, -) - - -_UPDATEJOBREQUEST = _descriptor.Descriptor( - name="UpdateJobRequest", - full_name="google.cloud.dataproc.v1beta2.UpdateJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.UpdateJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.UpdateJobRequest.region", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.UpdateJobRequest.job_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job", - full_name="google.cloud.dataproc.v1beta2.UpdateJobRequest.job", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_mask", - full_name="google.cloud.dataproc.v1beta2.UpdateJobRequest.update_mask", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=7000, - serialized_end=7193, -) - - -_LISTJOBSRESPONSE = _descriptor.Descriptor( - name="ListJobsResponse", - full_name="google.cloud.dataproc.v1beta2.ListJobsResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="jobs", - full_name="google.cloud.dataproc.v1beta2.ListJobsResponse.jobs", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.dataproc.v1beta2.ListJobsResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=7195, - serialized_end=7298, -) - - -_CANCELJOBREQUEST = _descriptor.Descriptor( - name="CancelJobRequest", - full_name="google.cloud.dataproc.v1beta2.CancelJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.CancelJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.CancelJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.CancelJobRequest.job_id", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=7300, - serialized_end=7385, -) - - -_DELETEJOBREQUEST = _descriptor.Descriptor( - name="DeleteJobRequest", - full_name="google.cloud.dataproc.v1beta2.DeleteJobRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="project_id", - full_name="google.cloud.dataproc.v1beta2.DeleteJobRequest.project_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="region", - full_name="google.cloud.dataproc.v1beta2.DeleteJobRequest.region", - index=1, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.DeleteJobRequest.job_id", - index=2, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=7387, - serialized_end=7472, -) - -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY.fields_by_name[ - "value" -].enum_type = _LOGGINGCONFIG_LEVEL -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY.containing_type = _LOGGINGCONFIG -_LOGGINGCONFIG.fields_by_name[ - "driver_log_levels" -].message_type = _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY -_LOGGINGCONFIG_LEVEL.containing_type = _LOGGINGCONFIG -_HADOOPJOB_PROPERTIESENTRY.containing_type = _HADOOPJOB -_HADOOPJOB.fields_by_name["properties"].message_type = _HADOOPJOB_PROPERTIESENTRY -_HADOOPJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_HADOOPJOB.oneofs_by_name["driver"].fields.append( - _HADOOPJOB.fields_by_name["main_jar_file_uri"] -) -_HADOOPJOB.fields_by_name[ - "main_jar_file_uri" -].containing_oneof = _HADOOPJOB.oneofs_by_name["driver"] -_HADOOPJOB.oneofs_by_name["driver"].fields.append( - _HADOOPJOB.fields_by_name["main_class"] -) -_HADOOPJOB.fields_by_name["main_class"].containing_oneof = _HADOOPJOB.oneofs_by_name[ - "driver" -] -_SPARKJOB_PROPERTIESENTRY.containing_type = _SPARKJOB -_SPARKJOB.fields_by_name["properties"].message_type = _SPARKJOB_PROPERTIESENTRY -_SPARKJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_SPARKJOB.oneofs_by_name["driver"].fields.append( - _SPARKJOB.fields_by_name["main_jar_file_uri"] -) -_SPARKJOB.fields_by_name[ - "main_jar_file_uri" -].containing_oneof = _SPARKJOB.oneofs_by_name["driver"] -_SPARKJOB.oneofs_by_name["driver"].fields.append(_SPARKJOB.fields_by_name["main_class"]) -_SPARKJOB.fields_by_name["main_class"].containing_oneof = _SPARKJOB.oneofs_by_name[ - "driver" -] -_PYSPARKJOB_PROPERTIESENTRY.containing_type = _PYSPARKJOB -_PYSPARKJOB.fields_by_name["properties"].message_type = _PYSPARKJOB_PROPERTIESENTRY -_PYSPARKJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_HIVEJOB_SCRIPTVARIABLESENTRY.containing_type = _HIVEJOB -_HIVEJOB_PROPERTIESENTRY.containing_type = _HIVEJOB -_HIVEJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_HIVEJOB.fields_by_name["script_variables"].message_type = _HIVEJOB_SCRIPTVARIABLESENTRY -_HIVEJOB.fields_by_name["properties"].message_type = _HIVEJOB_PROPERTIESENTRY -_HIVEJOB.oneofs_by_name["queries"].fields.append( - _HIVEJOB.fields_by_name["query_file_uri"] -) -_HIVEJOB.fields_by_name["query_file_uri"].containing_oneof = _HIVEJOB.oneofs_by_name[ - "queries" -] -_HIVEJOB.oneofs_by_name["queries"].fields.append(_HIVEJOB.fields_by_name["query_list"]) -_HIVEJOB.fields_by_name["query_list"].containing_oneof = _HIVEJOB.oneofs_by_name[ - "queries" -] -_SPARKSQLJOB_SCRIPTVARIABLESENTRY.containing_type = _SPARKSQLJOB -_SPARKSQLJOB_PROPERTIESENTRY.containing_type = _SPARKSQLJOB -_SPARKSQLJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_SPARKSQLJOB.fields_by_name[ - "script_variables" -].message_type = _SPARKSQLJOB_SCRIPTVARIABLESENTRY -_SPARKSQLJOB.fields_by_name["properties"].message_type = _SPARKSQLJOB_PROPERTIESENTRY -_SPARKSQLJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_SPARKSQLJOB.oneofs_by_name["queries"].fields.append( - _SPARKSQLJOB.fields_by_name["query_file_uri"] -) -_SPARKSQLJOB.fields_by_name[ - "query_file_uri" -].containing_oneof = _SPARKSQLJOB.oneofs_by_name["queries"] -_SPARKSQLJOB.oneofs_by_name["queries"].fields.append( - _SPARKSQLJOB.fields_by_name["query_list"] -) -_SPARKSQLJOB.fields_by_name[ - "query_list" -].containing_oneof = _SPARKSQLJOB.oneofs_by_name["queries"] -_PIGJOB_SCRIPTVARIABLESENTRY.containing_type = _PIGJOB -_PIGJOB_PROPERTIESENTRY.containing_type = _PIGJOB -_PIGJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_PIGJOB.fields_by_name["script_variables"].message_type = _PIGJOB_SCRIPTVARIABLESENTRY -_PIGJOB.fields_by_name["properties"].message_type = _PIGJOB_PROPERTIESENTRY -_PIGJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_PIGJOB.oneofs_by_name["queries"].fields.append( - _PIGJOB.fields_by_name["query_file_uri"] -) -_PIGJOB.fields_by_name["query_file_uri"].containing_oneof = _PIGJOB.oneofs_by_name[ - "queries" -] -_PIGJOB.oneofs_by_name["queries"].fields.append(_PIGJOB.fields_by_name["query_list"]) -_PIGJOB.fields_by_name["query_list"].containing_oneof = _PIGJOB.oneofs_by_name[ - "queries" -] -_SPARKRJOB_PROPERTIESENTRY.containing_type = _SPARKRJOB -_SPARKRJOB.fields_by_name["properties"].message_type = _SPARKRJOB_PROPERTIESENTRY -_SPARKRJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_PRESTOJOB_PROPERTIESENTRY.containing_type = _PRESTOJOB -_PRESTOJOB.fields_by_name["query_list"].message_type = _QUERYLIST -_PRESTOJOB.fields_by_name["properties"].message_type = _PRESTOJOB_PROPERTIESENTRY -_PRESTOJOB.fields_by_name["logging_config"].message_type = _LOGGINGCONFIG -_PRESTOJOB.oneofs_by_name["queries"].fields.append( - _PRESTOJOB.fields_by_name["query_file_uri"] -) -_PRESTOJOB.fields_by_name[ - "query_file_uri" -].containing_oneof = _PRESTOJOB.oneofs_by_name["queries"] -_PRESTOJOB.oneofs_by_name["queries"].fields.append( - _PRESTOJOB.fields_by_name["query_list"] -) -_PRESTOJOB.fields_by_name["query_list"].containing_oneof = _PRESTOJOB.oneofs_by_name[ - "queries" -] -_JOBSTATUS.fields_by_name["state"].enum_type = _JOBSTATUS_STATE -_JOBSTATUS.fields_by_name[ - "state_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_JOBSTATUS.fields_by_name["substate"].enum_type = _JOBSTATUS_SUBSTATE -_JOBSTATUS_STATE.containing_type = _JOBSTATUS -_JOBSTATUS_SUBSTATE.containing_type = _JOBSTATUS -_YARNAPPLICATION.fields_by_name["state"].enum_type = _YARNAPPLICATION_STATE -_YARNAPPLICATION_STATE.containing_type = _YARNAPPLICATION -_JOB_LABELSENTRY.containing_type = _JOB -_JOB.fields_by_name["reference"].message_type = _JOBREFERENCE -_JOB.fields_by_name["placement"].message_type = _JOBPLACEMENT -_JOB.fields_by_name["hadoop_job"].message_type = _HADOOPJOB -_JOB.fields_by_name["spark_job"].message_type = _SPARKJOB -_JOB.fields_by_name["pyspark_job"].message_type = _PYSPARKJOB -_JOB.fields_by_name["hive_job"].message_type = _HIVEJOB -_JOB.fields_by_name["pig_job"].message_type = _PIGJOB -_JOB.fields_by_name["spark_r_job"].message_type = _SPARKRJOB -_JOB.fields_by_name["spark_sql_job"].message_type = _SPARKSQLJOB -_JOB.fields_by_name["presto_job"].message_type = _PRESTOJOB -_JOB.fields_by_name["status"].message_type = _JOBSTATUS -_JOB.fields_by_name["status_history"].message_type = _JOBSTATUS -_JOB.fields_by_name["yarn_applications"].message_type = _YARNAPPLICATION -_JOB.fields_by_name["labels"].message_type = _JOB_LABELSENTRY -_JOB.fields_by_name["scheduling"].message_type = _JOBSCHEDULING -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["hadoop_job"]) -_JOB.fields_by_name["hadoop_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_job"]) -_JOB.fields_by_name["spark_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["pyspark_job"]) -_JOB.fields_by_name["pyspark_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["hive_job"]) -_JOB.fields_by_name["hive_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["pig_job"]) -_JOB.fields_by_name["pig_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_r_job"]) -_JOB.fields_by_name["spark_r_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["spark_sql_job"]) -_JOB.fields_by_name["spark_sql_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOB.oneofs_by_name["type_job"].fields.append(_JOB.fields_by_name["presto_job"]) -_JOB.fields_by_name["presto_job"].containing_oneof = _JOB.oneofs_by_name["type_job"] -_JOBMETADATA.fields_by_name["status"].message_type = _JOBSTATUS -_JOBMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_SUBMITJOBREQUEST.fields_by_name["job"].message_type = _JOB -_LISTJOBSREQUEST.fields_by_name[ - "job_state_matcher" -].enum_type = _LISTJOBSREQUEST_JOBSTATEMATCHER -_LISTJOBSREQUEST_JOBSTATEMATCHER.containing_type = _LISTJOBSREQUEST -_UPDATEJOBREQUEST.fields_by_name["job"].message_type = _JOB -_UPDATEJOBREQUEST.fields_by_name[ - "update_mask" -].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK -_LISTJOBSRESPONSE.fields_by_name["jobs"].message_type = _JOB -DESCRIPTOR.message_types_by_name["LoggingConfig"] = _LOGGINGCONFIG -DESCRIPTOR.message_types_by_name["HadoopJob"] = _HADOOPJOB -DESCRIPTOR.message_types_by_name["SparkJob"] = _SPARKJOB -DESCRIPTOR.message_types_by_name["PySparkJob"] = _PYSPARKJOB -DESCRIPTOR.message_types_by_name["QueryList"] = _QUERYLIST -DESCRIPTOR.message_types_by_name["HiveJob"] = _HIVEJOB -DESCRIPTOR.message_types_by_name["SparkSqlJob"] = _SPARKSQLJOB -DESCRIPTOR.message_types_by_name["PigJob"] = _PIGJOB -DESCRIPTOR.message_types_by_name["SparkRJob"] = _SPARKRJOB -DESCRIPTOR.message_types_by_name["PrestoJob"] = _PRESTOJOB -DESCRIPTOR.message_types_by_name["JobPlacement"] = _JOBPLACEMENT -DESCRIPTOR.message_types_by_name["JobStatus"] = _JOBSTATUS -DESCRIPTOR.message_types_by_name["JobReference"] = _JOBREFERENCE -DESCRIPTOR.message_types_by_name["YarnApplication"] = _YARNAPPLICATION -DESCRIPTOR.message_types_by_name["Job"] = _JOB -DESCRIPTOR.message_types_by_name["JobScheduling"] = _JOBSCHEDULING -DESCRIPTOR.message_types_by_name["JobMetadata"] = _JOBMETADATA -DESCRIPTOR.message_types_by_name["SubmitJobRequest"] = _SUBMITJOBREQUEST -DESCRIPTOR.message_types_by_name["GetJobRequest"] = _GETJOBREQUEST -DESCRIPTOR.message_types_by_name["ListJobsRequest"] = _LISTJOBSREQUEST -DESCRIPTOR.message_types_by_name["UpdateJobRequest"] = _UPDATEJOBREQUEST -DESCRIPTOR.message_types_by_name["ListJobsResponse"] = _LISTJOBSRESPONSE -DESCRIPTOR.message_types_by_name["CancelJobRequest"] = _CANCELJOBREQUEST -DESCRIPTOR.message_types_by_name["DeleteJobRequest"] = _DELETEJOBREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -LoggingConfig = _reflection.GeneratedProtocolMessageType( - "LoggingConfig", - (_message.Message,), - { - "DriverLogLevelsEntry": _reflection.GeneratedProtocolMessageType( - "DriverLogLevelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _LOGGINGCONFIG_DRIVERLOGLEVELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.LoggingConfig.DriverLogLevelsEntry) - }, - ), - "DESCRIPTOR": _LOGGINGCONFIG, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """The runtime logging config of the job. - - Attributes: - driver_log_levels: - The per-package log levels for the driver. This may include - “root” package name to configure rootLogger. Examples: - ‘com.google = FATAL’, ‘root = INFO’, ‘org.apache = DEBUG’ - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.LoggingConfig) - }, -) -_sym_db.RegisterMessage(LoggingConfig) -_sym_db.RegisterMessage(LoggingConfig.DriverLogLevelsEntry) - -HadoopJob = _reflection.GeneratedProtocolMessageType( - "HadoopJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _HADOOPJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HadoopJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _HADOOPJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Hadoop MapReduce - `__ jobs on - `Apache Hadoop YARN `__. - - Attributes: - driver: - Required. Indicates the location of the driver’s main class. - Specify either the jar file that contains the main class or - the main class name. To specify both, add the jar file to - ``jar_file_uris``, and then specify the main class name in - this property. - main_jar_file_uri: - The HCFS URI of the jar file containing the main class. - Examples: ‘gs://foo-bucket/analytics-binaries/extract-useful- - metrics-mr.jar’ ‘hdfs:/tmp/test-samples/custom-wordcount.jar’ - ‘file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce- - examples.jar’ - main_class: - The name of the driver’s main class. The jar file containing - the class must be in the default CLASSPATH or specified in - ``jar_file_uris``. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``-libjars`` or ``-Dfoo=bar``, that can be - set as job properties, since a collision may occur that causes - an incorrect job submission. - jar_file_uris: - Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop - driver and tasks. - file_uris: - Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to - be copied to the working directory of Hadoop drivers and - distributed tasks. Useful for naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted in the working - directory of Hadoop drivers and tasks. Supported file types: - .jar, .tar, .tar.gz, .tgz, or .zip. - properties: - Optional. A mapping of property names to values, used to - configure Hadoop. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in ``/etc/hadoop/conf/*-site`` and classes in user code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HadoopJob) - }, -) -_sym_db.RegisterMessage(HadoopJob) -_sym_db.RegisterMessage(HadoopJob.PropertiesEntry) - -SparkJob = _reflection.GeneratedProtocolMessageType( - "SparkJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SPARKJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Spark `__ - applications on YARN. The specification of the main method to call to - drive the job. Specify either the jar file that contains the main - class or the main class name. To pass both a main jar and a main class - in that jar, add the jar to ``CommonJob.jar_file_uris``, and then - specify the main class name in ``main_class``. - - Attributes: - main_jar_file_uri: - The HCFS URI of the jar file that contains the main class. - main_class: - The name of the driver’s main class. The jar file that - contains the class must be in the default CLASSPATH or - specified in ``jar_file_uris``. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``--conf``, that can be set as job - properties, since a collision may occur that causes an - incorrect job submission. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATHs of - the Spark driver and tasks. - file_uris: - Optional. HCFS URIs of files to be copied to the working - directory of Spark drivers and distributed tasks. Useful for - naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted in the working - directory of Spark drivers and tasks. Supported file types: - .jar, .tar, .tar.gz, .tgz, and .zip. - properties: - Optional. A mapping of property names to values, used to - configure Spark. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/spark/conf/spark-defaults.conf and classes in user - code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkJob) - }, -) -_sym_db.RegisterMessage(SparkJob) -_sym_db.RegisterMessage(SparkJob.PropertiesEntry) - -PySparkJob = _reflection.GeneratedProtocolMessageType( - "PySparkJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PYSPARKJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PySparkJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _PYSPARKJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache PySpark - `__ - applications on YARN. - - Attributes: - main_python_file_uri: - Required. The HCFS URI of the main Python file to use as the - driver. Must be a .py file. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``--conf``, that can be set as job - properties, since a collision may occur that causes an - incorrect job submission. - python_file_uris: - Optional. HCFS file URIs of Python files to pass to the - PySpark framework. Supported file types: .py, .egg, and .zip. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATHs of - the Python driver and tasks. - file_uris: - Optional. HCFS URIs of files to be copied to the working - directory of Python drivers and distributed tasks. Useful for - naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted in the working - directory of .jar, .tar, .tar.gz, .tgz, and .zip. - properties: - Optional. A mapping of property names to values, used to - configure PySpark. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/spark/conf/spark-defaults.conf and classes in user - code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PySparkJob) - }, -) -_sym_db.RegisterMessage(PySparkJob) -_sym_db.RegisterMessage(PySparkJob.PropertiesEntry) - -QueryList = _reflection.GeneratedProtocolMessageType( - "QueryList", - (_message.Message,), - { - "DESCRIPTOR": _QUERYLIST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A list of queries to run on a cluster. - - Attributes: - queries: - Required. The queries to execute. You do not need to terminate - a query with a semicolon. Multiple queries can be specified in - one string by separating each with a semicolon. Here is an - example of an Cloud Dataproc API snippet that uses a QueryList - to specify a HiveJob: :: "hiveJob": { "queryList": { - "queries": [ "query1", "query2", - "query3;query4", ] } } - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.QueryList) - }, -) -_sym_db.RegisterMessage(QueryList) - -HiveJob = _reflection.GeneratedProtocolMessageType( - "HiveJob", - (_message.Message,), - { - "ScriptVariablesEntry": _reflection.GeneratedProtocolMessageType( - "ScriptVariablesEntry", - (_message.Message,), - { - "DESCRIPTOR": _HIVEJOB_SCRIPTVARIABLESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HiveJob.ScriptVariablesEntry) - }, - ), - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _HIVEJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HiveJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _HIVEJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Hive `__ - queries on YARN. - - Attributes: - queries: - Required. The sequence of Hive queries to execute, specified - as either an HCFS file URI or a list of queries. - query_file_uri: - The HCFS URI of the script that contains Hive queries. - query_list: - A list of queries. - continue_on_failure: - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` can - be useful when executing independent parallel queries. - script_variables: - Optional. Mapping of query variable names to values - (equivalent to the Hive command: ``SET name="value";``). - properties: - Optional. A mapping of property names and values, used to - configure Hive. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in ``/etc/hadoop/conf/*-site``.xml, /etc/hive/conf/hive- - site.xml, and classes in user code. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATH of - the Hive server and Hadoop MapReduce (MR) tasks. Can contain - Hive SerDes and UDFs. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.HiveJob) - }, -) -_sym_db.RegisterMessage(HiveJob) -_sym_db.RegisterMessage(HiveJob.ScriptVariablesEntry) -_sym_db.RegisterMessage(HiveJob.PropertiesEntry) - -SparkSqlJob = _reflection.GeneratedProtocolMessageType( - "SparkSqlJob", - (_message.Message,), - { - "ScriptVariablesEntry": _reflection.GeneratedProtocolMessageType( - "ScriptVariablesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKSQLJOB_SCRIPTVARIABLESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkSqlJob.ScriptVariablesEntry) - }, - ), - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKSQLJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkSqlJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SPARKSQLJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Spark SQL - `__ queries. - - Attributes: - queries: - Required. The sequence of Spark SQL queries to execute, - specified as either an HCFS file URI or as a list of queries. - query_file_uri: - The HCFS URI of the script that contains SQL queries. - query_list: - A list of queries. - script_variables: - Optional. Mapping of query variable names to values - (equivalent to the Spark SQL command: SET ``name="value";``). - properties: - Optional. A mapping of property names to values, used to - configure Spark SQL’s SparkConf. Properties that conflict with - values set by the Dataproc API may be overwritten. - jar_file_uris: - Optional. HCFS URIs of jar files to be added to the Spark - CLASSPATH. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkSqlJob) - }, -) -_sym_db.RegisterMessage(SparkSqlJob) -_sym_db.RegisterMessage(SparkSqlJob.ScriptVariablesEntry) -_sym_db.RegisterMessage(SparkSqlJob.PropertiesEntry) - -PigJob = _reflection.GeneratedProtocolMessageType( - "PigJob", - (_message.Message,), - { - "ScriptVariablesEntry": _reflection.GeneratedProtocolMessageType( - "ScriptVariablesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PIGJOB_SCRIPTVARIABLESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PigJob.ScriptVariablesEntry) - }, - ), - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PIGJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PigJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _PIGJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache Pig `__ - queries on YARN. - - Attributes: - queries: - Required. The sequence of Pig queries to execute, specified as - an HCFS file URI or a list of queries. - query_file_uri: - The HCFS URI of the script that contains the Pig queries. - query_list: - A list of queries. - continue_on_failure: - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` can - be useful when executing independent parallel queries. - script_variables: - Optional. Mapping of query variable names to values - (equivalent to the Pig command: ``name=[value]``). - properties: - Optional. A mapping of property names to values, used to - configure Pig. Properties that conflict with values set by the - Dataproc API may be overwritten. Can include properties set in - ``/etc/hadoop/conf/*-site``.xml, /etc/pig/conf/pig.properties, and - classes in user code. - jar_file_uris: - Optional. HCFS URIs of jar files to add to the CLASSPATH of - the Pig Client and Hadoop MapReduce (MR) tasks. Can contain - Pig UDFs. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PigJob) - }, -) -_sym_db.RegisterMessage(PigJob) -_sym_db.RegisterMessage(PigJob.ScriptVariablesEntry) -_sym_db.RegisterMessage(PigJob.PropertiesEntry) - -SparkRJob = _reflection.GeneratedProtocolMessageType( - "SparkRJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _SPARKRJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkRJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _SPARKRJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Apache SparkR - `__ applications on - YARN. - - Attributes: - main_r_file_uri: - Required. The HCFS URI of the main R file to use as the - driver. Must be a .R file. - args: - Optional. The arguments to pass to the driver. Do not include - arguments, such as ``--conf``, that can be set as job - properties, since a collision may occur that causes an - incorrect job submission. - file_uris: - Optional. HCFS URIs of files to be copied to the working - directory of R drivers and distributed tasks. Useful for - naively parallel tasks. - archive_uris: - Optional. HCFS URIs of archives to be extracted in the working - directory of Spark drivers and tasks. Supported file types: - .jar, .tar, .tar.gz, .tgz, and .zip. - properties: - Optional. A mapping of property names to values, used to - configure SparkR. Properties that conflict with values set by - the Dataproc API may be overwritten. Can include properties - set in /etc/spark/conf/spark-defaults.conf and classes in user - code. - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SparkRJob) - }, -) -_sym_db.RegisterMessage(SparkRJob) -_sym_db.RegisterMessage(SparkRJob.PropertiesEntry) - -PrestoJob = _reflection.GeneratedProtocolMessageType( - "PrestoJob", - (_message.Message,), - { - "PropertiesEntry": _reflection.GeneratedProtocolMessageType( - "PropertiesEntry", - (_message.Message,), - { - "DESCRIPTOR": _PRESTOJOB_PROPERTIESENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PrestoJob.PropertiesEntry) - }, - ), - "DESCRIPTOR": _PRESTOJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job for running `Presto `__ queries. - **IMPORTANT**: The `Dataproc Presto Optional Component - `__ - must be enabled when the cluster is created to submit a Presto job to - the cluster. - - Attributes: - queries: - Required. The sequence of Presto queries to execute, specified - as either an HCFS file URI or as a list of queries. - query_file_uri: - The HCFS URI of the script that contains SQL queries. - query_list: - A list of queries. - continue_on_failure: - Optional. Whether to continue executing queries if a query - fails. The default value is ``false``. Setting to ``true`` can - be useful when executing independent parallel queries. - output_format: - Optional. The format in which query output will be displayed. - See the Presto documentation for supported output formats - client_tags: - Optional. Presto client tags to attach to this query - properties: - Optional. A mapping of property names to values. Used to set - Presto `session properties - `__ - Equivalent to using the –session flag in the Presto CLI - logging_config: - Optional. The runtime log config for job execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.PrestoJob) - }, -) -_sym_db.RegisterMessage(PrestoJob) -_sym_db.RegisterMessage(PrestoJob.PropertiesEntry) - -JobPlacement = _reflection.GeneratedProtocolMessageType( - "JobPlacement", - (_message.Message,), - { - "DESCRIPTOR": _JOBPLACEMENT, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """Dataproc job config. - - Attributes: - cluster_name: - Required. The name of the cluster where the job will be - submitted. - cluster_uuid: - Output only. A cluster UUID generated by the Dataproc service - when the job is submitted. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobPlacement) - }, -) -_sym_db.RegisterMessage(JobPlacement) - -JobStatus = _reflection.GeneratedProtocolMessageType( - "JobStatus", - (_message.Message,), - { - "DESCRIPTOR": _JOBSTATUS, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """Dataproc job status. - - Attributes: - state: - Output only. A state message specifying the overall job state. - details: - Output only. Optional Job state details, such as an error - description if the state is ERROR. - state_start_time: - Output only. The time when this state was entered. - substate: - Output only. Additional state information, which includes - status reported by the agent. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobStatus) - }, -) -_sym_db.RegisterMessage(JobStatus) - -JobReference = _reflection.GeneratedProtocolMessageType( - "JobReference", - (_message.Message,), - { - "DESCRIPTOR": _JOBREFERENCE, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """Encapsulates the full scoping used to reference a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - job_id: - Optional. The job ID, which must be unique within the project. - The ID must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), or hyphens (-). The maximum length is 100 - characters. If not specified by the caller, the job ID will - be provided by the server. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobReference) - }, -) -_sym_db.RegisterMessage(JobReference) - -YarnApplication = _reflection.GeneratedProtocolMessageType( - "YarnApplication", - (_message.Message,), - { - "DESCRIPTOR": _YARNAPPLICATION, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A YARN application created by a job. Application information is a - subset of - org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. - **Beta Feature**: This report is available for testing purposes only. - It may be changed before final release. - - Attributes: - name: - Output only. The application name. - state: - Output only. The application state. - progress: - Output only. The numerical progress of the application, from 1 - to 100. - tracking_url: - Output only. The HTTP URL of the ApplicationMaster, - HistoryServer, or TimelineServer that provides application- - specific information. The URL uses the internal hostname, and - requires a proxy server for resolution and, possibly, access. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.YarnApplication) - }, -) -_sym_db.RegisterMessage(YarnApplication) - -Job = _reflection.GeneratedProtocolMessageType( - "Job", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _JOB_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.Job.LabelsEntry) - }, - ), - "DESCRIPTOR": _JOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A Dataproc job resource. - - Attributes: - reference: - Optional. The fully qualified reference to the job, which can - be used to obtain the equivalent REST path of the job - resource. If this property is not specified when a job is - created, the server generates a job_id. - placement: - Required. Job information, including how, when, and where to - run the job. - type_job: - Required. The application/framework-specific portion of the - job. - hadoop_job: - Optional. Job is a Hadoop job. - spark_job: - Optional. Job is a Spark job. - pyspark_job: - Optional. Job is a PySpark job. - hive_job: - Optional. Job is a Hive job. - pig_job: - Optional. Job is a Pig job. - spark_r_job: - Optional. Job is a SparkR job. - spark_sql_job: - Optional. Job is a SparkSql job. - presto_job: - Optional. Job is a Presto job. - status: - Output only. The job status. Additional application-specific - status information may be contained in the type_job and - yarn_applications fields. - status_history: - Output only. The previous job status. - yarn_applications: - Output only. The collection of YARN applications spun up by - this job. **Beta** Feature: This report is available for - testing purposes only. It may be changed before final release. - submitted_by: - Output only. The email address of the user submitting the job. - For jobs submitted on the cluster, the address is - username@hostname. - driver_output_resource_uri: - Output only. A URI pointing to the location of the stdout of - the job’s driver program. - driver_control_files_uri: - Output only. If present, the location of miscellaneous control - files which may be used as part of job setup and handling. If - not present, control files may be placed in the same location - as ``driver_output_uri``. - labels: - Optional. The labels to associate with this job. Label - **keys** must contain 1 to 63 characters, and must conform to - `RFC 1035 `__. Label - **values** may be empty, but, if present, must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. No more than 32 - labels can be associated with a job. - scheduling: - Optional. Job scheduling configuration. - job_uuid: - Output only. A UUID that uniquely identifies a job within the - project over time. This is in contrast to a user-settable - reference.job_id that may be reused over time. - done: - Output only. Indicates whether the job is completed. If the - value is ``false``, the job is still in progress. If ``true``, - the job is completed, and ``status.state`` field will indicate - if it was successful, failed, or cancelled. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.Job) - }, -) -_sym_db.RegisterMessage(Job) -_sym_db.RegisterMessage(Job.LabelsEntry) - -JobScheduling = _reflection.GeneratedProtocolMessageType( - "JobScheduling", - (_message.Message,), - { - "DESCRIPTOR": _JOBSCHEDULING, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """Job scheduling options. - - Attributes: - max_failures_per_hour: - Optional. Maximum number of times per hour a driver may be - restarted as a result of driver terminating with non-zero code - before job is reported failed. A job may be reported as - thrashing if driver exits with non-zero code 4 times within 10 - minute window. Maximum value is 10. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobScheduling) - }, -) -_sym_db.RegisterMessage(JobScheduling) - -JobMetadata = _reflection.GeneratedProtocolMessageType( - "JobMetadata", - (_message.Message,), - { - "DESCRIPTOR": _JOBMETADATA, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """Job Operation metadata. - - Attributes: - job_id: - Output only. The job id. - status: - Output only. Most recent job status. - operation_type: - Output only. Operation type. - start_time: - Output only. Job submission time. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.JobMetadata) - }, -) -_sym_db.RegisterMessage(JobMetadata) - -SubmitJobRequest = _reflection.GeneratedProtocolMessageType( - "SubmitJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _SUBMITJOBREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A request to submit a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job: - Required. The job resource. - request_id: - Optional. A unique id used to identify the request. If the - server receives two [SubmitJobRequest][google.cloud.dataproc.v - 1beta2.SubmitJobRequest] requests with the same id, then the - second request will be ignored and the first - [Job][google.cloud.dataproc.v1beta2.Job] created and stored in - the backend is returned. It is recommended to always set this - value to a `UUID `__. The id must contain only letters (a-z, - A-Z), numbers (0-9), underscores (_), and hyphens (-). The - maximum length is 40 characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.SubmitJobRequest) - }, -) -_sym_db.RegisterMessage(SubmitJobRequest) - -GetJobRequest = _reflection.GeneratedProtocolMessageType( - "GetJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETJOBREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A request to get the resource representation for a job in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GetJobRequest) - }, -) -_sym_db.RegisterMessage(GetJobRequest) - -ListJobsRequest = _reflection.GeneratedProtocolMessageType( - "ListJobsRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTJOBSREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A request to list jobs in a project. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - page_size: - Optional. The number of results to return in each response. - page_token: - Optional. The page token, returned by a previous call, to - request the next page of results. - cluster_name: - Optional. If set, the returned jobs list includes only jobs - that were submitted to the named cluster. - job_state_matcher: - Optional. Specifies enumerated categories of jobs to list. - (default = match ALL jobs). If ``filter`` is provided, - ``jobStateMatcher`` will be ignored. - filter: - Optional. A filter constraining the jobs to list. Filters are - case-sensitive and have the following syntax: [field = value] - AND [field [= value]] … where **field** is ``status.state`` - or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** - can be ``*`` to match all values. ``status.state`` can be - either ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` - operator is supported; space-separated items are treated as - having an implicit ``AND`` operator. Example filter: - status.state = ACTIVE AND labels.env = staging AND - labels.starred = \* - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListJobsRequest) - }, -) -_sym_db.RegisterMessage(ListJobsRequest) - -UpdateJobRequest = _reflection.GeneratedProtocolMessageType( - "UpdateJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEJOBREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A request to update a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - job: - Required. The changes to the job. - update_mask: - Required. Specifies the path, relative to Job, of the field to - update. For example, to update the labels of a Job the - update_mask parameter would be specified as labels, and the - ``PATCH`` request body would specify the new value. Note: - Currently, labels is the only field that can be updated. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.UpdateJobRequest) - }, -) -_sym_db.RegisterMessage(UpdateJobRequest) - -ListJobsResponse = _reflection.GeneratedProtocolMessageType( - "ListJobsResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTJOBSRESPONSE, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A list of jobs in a project. - - Attributes: - jobs: - Output only. Jobs list. - next_page_token: - Optional. This token is included in the response if there are - more results to fetch. To fetch additional results, provide - this value as the ``page_token`` in a subsequent - ListJobsRequest. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListJobsResponse) - }, -) -_sym_db.RegisterMessage(ListJobsResponse) - -CancelJobRequest = _reflection.GeneratedProtocolMessageType( - "CancelJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _CANCELJOBREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A request to cancel a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CancelJobRequest) - }, -) -_sym_db.RegisterMessage(CancelJobRequest) - -DeleteJobRequest = _reflection.GeneratedProtocolMessageType( - "DeleteJobRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEJOBREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.jobs_pb2", - "__doc__": """A request to delete a job. - - Attributes: - project_id: - Required. The ID of the Google Cloud Platform project that the - job belongs to. - region: - Required. The Dataproc region in which to handle the request. - job_id: - Required. The job ID. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DeleteJobRequest) - }, -) -_sym_db.RegisterMessage(DeleteJobRequest) - - -DESCRIPTOR._options = None -_LOGGINGCONFIG_DRIVERLOGLEVELSENTRY._options = None -_HADOOPJOB_PROPERTIESENTRY._options = None -_HADOOPJOB.fields_by_name["args"]._options = None -_HADOOPJOB.fields_by_name["jar_file_uris"]._options = None -_HADOOPJOB.fields_by_name["file_uris"]._options = None -_HADOOPJOB.fields_by_name["archive_uris"]._options = None -_HADOOPJOB.fields_by_name["properties"]._options = None -_HADOOPJOB.fields_by_name["logging_config"]._options = None -_SPARKJOB_PROPERTIESENTRY._options = None -_SPARKJOB.fields_by_name["args"]._options = None -_SPARKJOB.fields_by_name["jar_file_uris"]._options = None -_SPARKJOB.fields_by_name["file_uris"]._options = None -_SPARKJOB.fields_by_name["archive_uris"]._options = None -_SPARKJOB.fields_by_name["properties"]._options = None -_SPARKJOB.fields_by_name["logging_config"]._options = None -_PYSPARKJOB_PROPERTIESENTRY._options = None -_PYSPARKJOB.fields_by_name["main_python_file_uri"]._options = None -_PYSPARKJOB.fields_by_name["args"]._options = None -_PYSPARKJOB.fields_by_name["python_file_uris"]._options = None -_PYSPARKJOB.fields_by_name["jar_file_uris"]._options = None -_PYSPARKJOB.fields_by_name["file_uris"]._options = None -_PYSPARKJOB.fields_by_name["archive_uris"]._options = None -_PYSPARKJOB.fields_by_name["properties"]._options = None -_PYSPARKJOB.fields_by_name["logging_config"]._options = None -_QUERYLIST.fields_by_name["queries"]._options = None -_HIVEJOB_SCRIPTVARIABLESENTRY._options = None -_HIVEJOB_PROPERTIESENTRY._options = None -_HIVEJOB.fields_by_name["continue_on_failure"]._options = None -_HIVEJOB.fields_by_name["script_variables"]._options = None -_HIVEJOB.fields_by_name["properties"]._options = None -_HIVEJOB.fields_by_name["jar_file_uris"]._options = None -_SPARKSQLJOB_SCRIPTVARIABLESENTRY._options = None -_SPARKSQLJOB_PROPERTIESENTRY._options = None -_SPARKSQLJOB.fields_by_name["script_variables"]._options = None -_SPARKSQLJOB.fields_by_name["properties"]._options = None -_SPARKSQLJOB.fields_by_name["jar_file_uris"]._options = None -_SPARKSQLJOB.fields_by_name["logging_config"]._options = None -_PIGJOB_SCRIPTVARIABLESENTRY._options = None -_PIGJOB_PROPERTIESENTRY._options = None -_PIGJOB.fields_by_name["continue_on_failure"]._options = None -_PIGJOB.fields_by_name["script_variables"]._options = None -_PIGJOB.fields_by_name["properties"]._options = None -_PIGJOB.fields_by_name["jar_file_uris"]._options = None -_PIGJOB.fields_by_name["logging_config"]._options = None -_SPARKRJOB_PROPERTIESENTRY._options = None -_SPARKRJOB.fields_by_name["main_r_file_uri"]._options = None -_SPARKRJOB.fields_by_name["args"]._options = None -_SPARKRJOB.fields_by_name["file_uris"]._options = None -_SPARKRJOB.fields_by_name["archive_uris"]._options = None -_SPARKRJOB.fields_by_name["properties"]._options = None -_SPARKRJOB.fields_by_name["logging_config"]._options = None -_PRESTOJOB_PROPERTIESENTRY._options = None -_PRESTOJOB.fields_by_name["continue_on_failure"]._options = None -_PRESTOJOB.fields_by_name["output_format"]._options = None -_PRESTOJOB.fields_by_name["client_tags"]._options = None -_PRESTOJOB.fields_by_name["properties"]._options = None -_PRESTOJOB.fields_by_name["logging_config"]._options = None -_JOBPLACEMENT.fields_by_name["cluster_name"]._options = None -_JOBPLACEMENT.fields_by_name["cluster_uuid"]._options = None -_JOBSTATUS.fields_by_name["state"]._options = None -_JOBSTATUS.fields_by_name["details"]._options = None -_JOBSTATUS.fields_by_name["state_start_time"]._options = None -_JOBSTATUS.fields_by_name["substate"]._options = None -_JOBREFERENCE.fields_by_name["project_id"]._options = None -_JOBREFERENCE.fields_by_name["job_id"]._options = None -_YARNAPPLICATION.fields_by_name["name"]._options = None -_YARNAPPLICATION.fields_by_name["state"]._options = None -_YARNAPPLICATION.fields_by_name["progress"]._options = None -_YARNAPPLICATION.fields_by_name["tracking_url"]._options = None -_JOB_LABELSENTRY._options = None -_JOB.fields_by_name["reference"]._options = None -_JOB.fields_by_name["placement"]._options = None -_JOB.fields_by_name["hadoop_job"]._options = None -_JOB.fields_by_name["spark_job"]._options = None -_JOB.fields_by_name["pyspark_job"]._options = None -_JOB.fields_by_name["hive_job"]._options = None -_JOB.fields_by_name["pig_job"]._options = None -_JOB.fields_by_name["spark_r_job"]._options = None -_JOB.fields_by_name["spark_sql_job"]._options = None -_JOB.fields_by_name["presto_job"]._options = None -_JOB.fields_by_name["status"]._options = None -_JOB.fields_by_name["status_history"]._options = None -_JOB.fields_by_name["yarn_applications"]._options = None -_JOB.fields_by_name["submitted_by"]._options = None -_JOB.fields_by_name["driver_output_resource_uri"]._options = None -_JOB.fields_by_name["driver_control_files_uri"]._options = None -_JOB.fields_by_name["labels"]._options = None -_JOB.fields_by_name["scheduling"]._options = None -_JOB.fields_by_name["job_uuid"]._options = None -_JOB.fields_by_name["done"]._options = None -_JOBSCHEDULING.fields_by_name["max_failures_per_hour"]._options = None -_JOBMETADATA.fields_by_name["job_id"]._options = None -_JOBMETADATA.fields_by_name["status"]._options = None -_JOBMETADATA.fields_by_name["operation_type"]._options = None -_JOBMETADATA.fields_by_name["start_time"]._options = None -_SUBMITJOBREQUEST.fields_by_name["project_id"]._options = None -_SUBMITJOBREQUEST.fields_by_name["region"]._options = None -_SUBMITJOBREQUEST.fields_by_name["job"]._options = None -_SUBMITJOBREQUEST.fields_by_name["request_id"]._options = None -_GETJOBREQUEST.fields_by_name["project_id"]._options = None -_GETJOBREQUEST.fields_by_name["region"]._options = None -_GETJOBREQUEST.fields_by_name["job_id"]._options = None -_LISTJOBSREQUEST.fields_by_name["project_id"]._options = None -_LISTJOBSREQUEST.fields_by_name["region"]._options = None -_LISTJOBSREQUEST.fields_by_name["page_size"]._options = None -_LISTJOBSREQUEST.fields_by_name["page_token"]._options = None -_LISTJOBSREQUEST.fields_by_name["cluster_name"]._options = None -_LISTJOBSREQUEST.fields_by_name["job_state_matcher"]._options = None -_LISTJOBSREQUEST.fields_by_name["filter"]._options = None -_UPDATEJOBREQUEST.fields_by_name["project_id"]._options = None -_UPDATEJOBREQUEST.fields_by_name["region"]._options = None -_UPDATEJOBREQUEST.fields_by_name["job_id"]._options = None -_UPDATEJOBREQUEST.fields_by_name["job"]._options = None -_UPDATEJOBREQUEST.fields_by_name["update_mask"]._options = None -_LISTJOBSRESPONSE.fields_by_name["jobs"]._options = None -_LISTJOBSRESPONSE.fields_by_name["next_page_token"]._options = None -_CANCELJOBREQUEST.fields_by_name["project_id"]._options = None -_CANCELJOBREQUEST.fields_by_name["region"]._options = None -_CANCELJOBREQUEST.fields_by_name["job_id"]._options = None -_DELETEJOBREQUEST.fields_by_name["project_id"]._options = None -_DELETEJOBREQUEST.fields_by_name["region"]._options = None -_DELETEJOBREQUEST.fields_by_name["job_id"]._options = None - -_JOBCONTROLLER = _descriptor.ServiceDescriptor( - name="JobController", - full_name="google.cloud.dataproc.v1beta2.JobController", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=7475, - serialized_end=8986, - methods=[ - _descriptor.MethodDescriptor( - name="SubmitJob", - full_name="google.cloud.dataproc.v1beta2.JobController.SubmitJob", - index=0, - containing_service=None, - input_type=_SUBMITJOBREQUEST, - output_type=_JOB, - serialized_options=b'\202\323\344\223\002@";/v1beta2/projects/{project_id}/regions/{region}/jobs:submit:\001*\332A\027project_id, region, job', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="SubmitJobAsOperation", - full_name="google.cloud.dataproc.v1beta2.JobController.SubmitJobAsOperation", - index=1, - containing_service=None, - input_type=_SUBMITJOBREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002K"F/v1beta2/projects/{project_id}/regions/{region}/jobs:submitAsOperation:\001*\332A\027project_id, region, job\312A\022\n\003Job\022\013JobMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetJob", - full_name="google.cloud.dataproc.v1beta2.JobController.GetJob", - index=2, - containing_service=None, - input_type=_GETJOBREQUEST, - output_type=_JOB, - serialized_options=b"\202\323\344\223\002?\022=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\332A\032project_id, region, job_id", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListJobs", - full_name="google.cloud.dataproc.v1beta2.JobController.ListJobs", - index=3, - containing_service=None, - input_type=_LISTJOBSREQUEST, - output_type=_LISTJOBSRESPONSE, - serialized_options=b"\202\323\344\223\0026\0224/v1beta2/projects/{project_id}/regions/{region}/jobs\332A\022project_id, region\332A\032project_id, region, filter", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateJob", - full_name="google.cloud.dataproc.v1beta2.JobController.UpdateJob", - index=4, - containing_service=None, - input_type=_UPDATEJOBREQUEST, - output_type=_JOB, - serialized_options=b"\202\323\344\223\002D2=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:\003job", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="CancelJob", - full_name="google.cloud.dataproc.v1beta2.JobController.CancelJob", - index=5, - containing_service=None, - input_type=_CANCELJOBREQUEST, - output_type=_JOB, - serialized_options=b'\202\323\344\223\002I"D/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}:cancel:\001*\332A\032project_id, region, job_id', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteJob", - full_name="google.cloud.dataproc.v1beta2.JobController.DeleteJob", - index=6, - containing_service=None, - input_type=_DELETEJOBREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002?*=/v1beta2/projects/{project_id}/regions/{region}/jobs/{job_id}\332A\032project_id, region, job_id", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_JOBCONTROLLER) - -DESCRIPTOR.services_by_name["JobController"] = _JOBCONTROLLER - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py deleted file mode 100644 index f1a19a55..00000000 --- a/google/cloud/dataproc_v1beta2/proto/jobs_pb2_grpc.py +++ /dev/null @@ -1,356 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.dataproc_v1beta2.proto import ( - jobs_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class JobControllerStub(object): - """The JobController provides methods to manage jobs. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.SubmitJob = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.SubmitJobAsOperation = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.GetJob = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/GetJob", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.ListJobs = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/ListJobs", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString, - ) - self.UpdateJob = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.CancelJob = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/CancelJob", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - ) - self.DeleteJob = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class JobControllerServicer(object): - """The JobController provides methods to manage jobs. - """ - - def SubmitJob(self, request, context): - """Submits a job to a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def SubmitJobAsOperation(self, request, context): - """Submits job to a cluster. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetJob(self, request, context): - """Gets the resource representation for a job in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListJobs(self, request, context): - """Lists regions/{region}/jobs in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateJob(self, request, context): - """Updates a job in a project. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def CancelJob(self, request, context): - """Starts a job cancellation request. To access the job resource - after cancellation, call - [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) - or - [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteJob(self, request, context): - """Deletes the job from the project. If the job is active, the delete fails, - and the response returns `FAILED_PRECONDITION`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_JobControllerServicer_to_server(servicer, server): - rpc_method_handlers = { - "SubmitJob": grpc.unary_unary_rpc_method_handler( - servicer.SubmitJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "SubmitJobAsOperation": grpc.unary_unary_rpc_method_handler( - servicer.SubmitJobAsOperation, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "GetJob": grpc.unary_unary_rpc_method_handler( - servicer.GetJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.GetJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "ListJobs": grpc.unary_unary_rpc_method_handler( - servicer.ListJobs, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsResponse.SerializeToString, - ), - "UpdateJob": grpc.unary_unary_rpc_method_handler( - servicer.UpdateJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.UpdateJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "CancelJob": grpc.unary_unary_rpc_method_handler( - servicer.CancelJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.CancelJobRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.SerializeToString, - ), - "DeleteJob": grpc.unary_unary_rpc_method_handler( - servicer.DeleteJob, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.DeleteJobRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.dataproc.v1beta2.JobController", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class JobController(object): - """The JobController provides methods to manage jobs. - """ - - @staticmethod - def SubmitJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def SubmitJobAsOperation( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.SubmitJobRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/GetJob", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.GetJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListJobs( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/ListJobs", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.ListJobsResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.UpdateJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def CancelJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/CancelJob", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.CancelJobRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.Job.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteJob( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.DeleteJobRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/dataproc_v1beta2/proto/operations_pb2.py b/google/cloud/dataproc_v1beta2/proto/operations_pb2.py deleted file mode 100644 index a4187389..00000000 --- a/google/cloud/dataproc_v1beta2/proto/operations_pb2.py +++ /dev/null @@ -1,501 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1beta2/proto/operations.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1beta2/proto/operations.proto", - package="google.cloud.dataproc.v1beta2", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.dataproc.v1beta2B\017OperationsProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n4google/cloud/dataproc_v1beta2/proto/operations.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xfa\x01\n\x16\x43lusterOperationStatus\x12J\n\x05state\x18\x01 \x01(\x0e\x32;.google.cloud.dataproc.v1beta2.ClusterOperationStatus.State\x12\x13\n\x0binner_state\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\x12\x34\n\x10state_start_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"8\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03"\x9f\x03\n\x18\x43lusterOperationMetadata\x12\x14\n\x0c\x63luster_name\x18\x07 \x01(\t\x12\x14\n\x0c\x63luster_uuid\x18\x08 \x01(\t\x12\x45\n\x06status\x18\t \x01(\x0b\x32\x35.google.cloud.dataproc.v1beta2.ClusterOperationStatus\x12M\n\x0estatus_history\x18\n \x03(\x0b\x32\x35.google.cloud.dataproc.v1beta2.ClusterOperationStatus\x12\x16\n\x0eoperation_type\x18\x0b \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x0c \x01(\t\x12S\n\x06labels\x18\r \x03(\x0b\x32\x43.google.cloud.dataproc.v1beta2.ClusterOperationMetadata.LabelsEntry\x12\x10\n\x08warnings\x18\x0e \x03(\t\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42}\n!com.google.cloud.dataproc.v1beta2B\x0fOperationsProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3', - dependencies=[ - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - ], -) - - -_CLUSTEROPERATIONSTATUS_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationStatus.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNKNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PENDING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DONE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=345, - serialized_end=401, -) -_sym_db.RegisterEnumDescriptor(_CLUSTEROPERATIONSTATUS_STATE) - - -_CLUSTEROPERATIONSTATUS = _descriptor.Descriptor( - name="ClusterOperationStatus", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationStatus", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationStatus.state", - index=0, - number=1, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="inner_state", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationStatus.inner_state", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="details", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationStatus.details", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state_start_time", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationStatus.state_start_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_CLUSTEROPERATIONSTATUS_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=151, - serialized_end=401, -) - - -_CLUSTEROPERATIONMETADATA_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=774, - serialized_end=819, -) - -_CLUSTEROPERATIONMETADATA = _descriptor.Descriptor( - name="ClusterOperationMetadata", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.cluster_name", - index=0, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.cluster_uuid", - index=1, - number=8, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.status", - index=2, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="status_history", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.status_history", - index=3, - number=10, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="operation_type", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.operation_type", - index=4, - number=11, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.description", - index=5, - number=12, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.labels", - index=6, - number=13, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="warnings", - full_name="google.cloud.dataproc.v1beta2.ClusterOperationMetadata.warnings", - index=7, - number=14, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTEROPERATIONMETADATA_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=404, - serialized_end=819, -) - -_CLUSTEROPERATIONSTATUS.fields_by_name[ - "state" -].enum_type = _CLUSTEROPERATIONSTATUS_STATE -_CLUSTEROPERATIONSTATUS.fields_by_name[ - "state_start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_CLUSTEROPERATIONSTATUS_STATE.containing_type = _CLUSTEROPERATIONSTATUS -_CLUSTEROPERATIONMETADATA_LABELSENTRY.containing_type = _CLUSTEROPERATIONMETADATA -_CLUSTEROPERATIONMETADATA.fields_by_name[ - "status" -].message_type = _CLUSTEROPERATIONSTATUS -_CLUSTEROPERATIONMETADATA.fields_by_name[ - "status_history" -].message_type = _CLUSTEROPERATIONSTATUS -_CLUSTEROPERATIONMETADATA.fields_by_name[ - "labels" -].message_type = _CLUSTEROPERATIONMETADATA_LABELSENTRY -DESCRIPTOR.message_types_by_name["ClusterOperationStatus"] = _CLUSTEROPERATIONSTATUS -DESCRIPTOR.message_types_by_name["ClusterOperationMetadata"] = _CLUSTEROPERATIONMETADATA -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -ClusterOperationStatus = _reflection.GeneratedProtocolMessageType( - "ClusterOperationStatus", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTEROPERATIONSTATUS, - "__module__": "google.cloud.dataproc_v1beta2.proto.operations_pb2", - "__doc__": """The status of the operation. - - Attributes: - state: - Output only. A message containing the operation state. - inner_state: - Output only. A message containing the detailed operation - state. - details: - Output only. A message containing any operation metadata - details. - state_start_time: - Output only. The time this state was entered. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperationStatus) - }, -) -_sym_db.RegisterMessage(ClusterOperationStatus) - -ClusterOperationMetadata = _reflection.GeneratedProtocolMessageType( - "ClusterOperationMetadata", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTEROPERATIONMETADATA_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.operations_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperationMetadata.LabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTEROPERATIONMETADATA, - "__module__": "google.cloud.dataproc_v1beta2.proto.operations_pb2", - "__doc__": """Metadata describing the operation. - - Attributes: - cluster_name: - Output only. Name of the cluster for the operation. - cluster_uuid: - Output only. Cluster UUID for the operation. - status: - Output only. Current operation status. - status_history: - Output only. The previous operation status. - operation_type: - Output only. The operation type. - description: - Output only. Short description of operation. - labels: - Output only. Labels associated with the operation - warnings: - Output only. Errors encountered during operation execution. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperationMetadata) - }, -) -_sym_db.RegisterMessage(ClusterOperationMetadata) -_sym_db.RegisterMessage(ClusterOperationMetadata.LabelsEntry) - - -DESCRIPTOR._options = None -_CLUSTEROPERATIONMETADATA_LABELSENTRY._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1beta2/proto/operations_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/operations_pb2_grpc.py deleted file mode 100644 index 8a939394..00000000 --- a/google/cloud/dataproc_v1beta2/proto/operations_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/dataproc_v1beta2/proto/shared_pb2.py b/google/cloud/dataproc_v1beta2/proto/shared_pb2.py deleted file mode 100644 index 136a7be9..00000000 --- a/google/cloud/dataproc_v1beta2/proto/shared_pb2.py +++ /dev/null @@ -1,169 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1beta2/proto/shared.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1beta2/proto/shared.proto", - package="google.cloud.dataproc.v1beta2", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.dataproc.v1beta2B\013SharedProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b"\n0google/cloud/dataproc_v1beta2/proto/shared.proto\x12\x1dgoogle.cloud.dataproc.v1beta2\x1a\x1cgoogle/api/annotations.proto*\xc2\x01\n\tComponent\x12\x19\n\x15\x43OMPONENT_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x41NACONDA\x10\x05\x12\n\n\x06\x44OCKER\x10\r\x12\t\n\x05\x44RUID\x10\t\x12\t\n\x05\x46LINK\x10\x0e\x12\x10\n\x0cHIVE_WEBHCAT\x10\x03\x12\x0b\n\x07JUPYTER\x10\x01\x12\x0c\n\x08KERBEROS\x10\x07\x12\n\n\x06PRESTO\x10\x06\x12\n\n\x06RANGER\x10\x0c\x12\x08\n\x04SOLR\x10\n\x12\x0c\n\x08ZEPPELIN\x10\x04\x12\r\n\tZOOKEEPER\x10\x08\x42y\n!com.google.cloud.dataproc.v1beta2B\x0bSharedProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3", - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], -) - -_COMPONENT = _descriptor.EnumDescriptor( - name="Component", - full_name="google.cloud.dataproc.v1beta2.Component", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="COMPONENT_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ANACONDA", - index=1, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DOCKER", - index=2, - number=13, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DRUID", - index=3, - number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FLINK", - index=4, - number=14, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="HIVE_WEBHCAT", - index=5, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="JUPYTER", - index=6, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="KERBEROS", - index=7, - number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PRESTO", - index=8, - number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RANGER", - index=9, - number=12, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="SOLR", - index=10, - number=10, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ZEPPELIN", - index=11, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="ZOOKEEPER", - index=12, - number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=114, - serialized_end=308, -) -_sym_db.RegisterEnumDescriptor(_COMPONENT) - -Component = enum_type_wrapper.EnumTypeWrapper(_COMPONENT) -COMPONENT_UNSPECIFIED = 0 -ANACONDA = 5 -DOCKER = 13 -DRUID = 9 -FLINK = 14 -HIVE_WEBHCAT = 3 -JUPYTER = 1 -KERBEROS = 7 -PRESTO = 6 -RANGER = 12 -SOLR = 10 -ZEPPELIN = 4 -ZOOKEEPER = 8 - - -DESCRIPTOR.enum_types_by_name["Component"] = _COMPONENT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - -DESCRIPTOR._options = None -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1beta2/proto/shared_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/shared_pb2_grpc.py deleted file mode 100644 index 8a939394..00000000 --- a/google/cloud/dataproc_v1beta2/proto/shared_pb2_grpc.py +++ /dev/null @@ -1,3 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py deleted file mode 100644 index 4f61fcb6..00000000 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2.py +++ /dev/null @@ -1,3551 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/cloud/dataproc_v1beta2/proto/workflow_templates.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database - -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.api import client_pb2 as google_dot_api_dot_client__pb2 -from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 -from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 -from google.cloud.dataproc_v1beta2.proto import ( - clusters_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2, -) -from google.cloud.dataproc_v1beta2.proto import ( - jobs_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name="google/cloud/dataproc_v1beta2/proto/workflow_templates.proto", - package="google.cloud.dataproc.v1beta2", - syntax="proto3", - serialized_options=b"\n!com.google.cloud.dataproc.v1beta2B\026WorkflowTemplatesProtoP\001ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataproc", - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\n\nhadoop_job\x18\x02 \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.HadoopJobH\x00\x12<\n\tspark_job\x18\x03 \x01(\x0b\x32\'.google.cloud.dataproc.v1beta2.SparkJobH\x00\x12@\n\x0bpyspark_job\x18\x04 \x01(\x0b\x32).google.cloud.dataproc.v1beta2.PySparkJobH\x00\x12:\n\x08hive_job\x18\x05 \x01(\x0b\x32&.google.cloud.dataproc.v1beta2.HiveJobH\x00\x12\x38\n\x07pig_job\x18\x06 \x01(\x0b\x32%.google.cloud.dataproc.v1beta2.PigJobH\x00\x12?\n\x0bspark_r_job\x18\x0b \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.SparkRJobH\x00\x12\x43\n\rspark_sql_job\x18\x07 \x01(\x0b\x32*.google.cloud.dataproc.v1beta2.SparkSqlJobH\x00\x12>\n\npresto_job\x18\x0c \x01(\x0b\x32(.google.cloud.dataproc.v1beta2.PrestoJobH\x00\x12J\n\x06labels\x18\x08 \x03(\x0b\x32\x35.google.cloud.dataproc.v1beta2.OrderedJob.LabelsEntryB\x03\xe0\x41\x01\x12\x45\n\nscheduling\x18\t \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.JobSchedulingB\x03\xe0\x41\x01\x12"\n\x15prerequisite_step_ids\x18\n \x03(\tB\x03\xe0\x41\x01\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\n\n\x08job_type"\x8e\x01\n\x11TemplateParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06\x66ields\x18\x02 \x03(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x46\n\nvalidation\x18\x04 \x01(\x0b\x32\x32.google.cloud.dataproc.v1beta2.ParameterValidation"\xab\x01\n\x13ParameterValidation\x12?\n\x05regex\x18\x01 \x01(\x0b\x32..google.cloud.dataproc.v1beta2.RegexValidationH\x00\x12@\n\x06values\x18\x02 \x01(\x0b\x32..google.cloud.dataproc.v1beta2.ValueValidationH\x00\x42\x11\n\x0fvalidation_type""\n\x0fRegexValidation\x12\x0f\n\x07regexes\x18\x01 \x03(\t"!\n\x0fValueValidation\x12\x0e\n\x06values\x18\x01 \x03(\t"\xc8\x05\n\x10WorkflowMetadata\x12\x15\n\x08template\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x14\n\x07version\x18\x02 \x01(\x05\x42\x03\xe0\x41\x03\x12L\n\x0e\x63reate_cluster\x18\x03 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.ClusterOperationB\x03\xe0\x41\x03\x12@\n\x05graph\x18\x04 \x01(\x0b\x32,.google.cloud.dataproc.v1beta2.WorkflowGraphB\x03\xe0\x41\x03\x12L\n\x0e\x64\x65lete_cluster\x18\x05 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.ClusterOperationB\x03\xe0\x41\x03\x12I\n\x05state\x18\x06 \x01(\x0e\x32\x35.google.cloud.dataproc.v1beta2.WorkflowMetadata.StateB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_name\x18\x07 \x01(\tB\x03\xe0\x41\x03\x12S\n\nparameters\x18\x08 \x03(\x0b\x32?.google.cloud.dataproc.v1beta2.WorkflowMetadata.ParametersEntry\x12\x33\n\nstart_time\x18\t \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x31\n\x08\x65nd_time\x18\n \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x19\n\x0c\x63luster_uuid\x18\x0b \x01(\tB\x03\xe0\x41\x03\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"8\n\x05State\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0b\n\x07PENDING\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x08\n\x04\x44ONE\x10\x03"T\n\x10\x43lusterOperation\x12\x19\n\x0coperation_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12\x12\n\x05\x65rror\x18\x02 \x01(\tB\x03\xe0\x41\x03\x12\x11\n\x04\x64one\x18\x03 \x01(\x08\x42\x03\xe0\x41\x03"P\n\rWorkflowGraph\x12?\n\x05nodes\x18\x01 \x03(\x0b\x32+.google.cloud.dataproc.v1beta2.WorkflowNodeB\x03\xe0\x41\x03"\xa9\x02\n\x0cWorkflowNode\x12\x14\n\x07step_id\x18\x01 \x01(\tB\x03\xe0\x41\x03\x12"\n\x15prerequisite_step_ids\x18\x02 \x03(\tB\x03\xe0\x41\x03\x12\x13\n\x06job_id\x18\x03 \x01(\tB\x03\xe0\x41\x03\x12I\n\x05state\x18\x05 \x01(\x0e\x32\x35.google.cloud.dataproc.v1beta2.WorkflowNode.NodeStateB\x03\xe0\x41\x03\x12\x12\n\x05\x65rror\x18\x06 \x01(\tB\x03\xe0\x41\x03"k\n\tNodeState\x12\x1b\n\x17NODE_STATUS_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x42LOCKED\x10\x01\x12\x0c\n\x08RUNNABLE\x10\x02\x12\x0b\n\x07RUNNING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05"\xa9\x01\n\x1d\x43reateWorkflowTemplateRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\x12(dataproc.googleapis.com/WorkflowTemplate\x12\x46\n\x08template\x18\x02 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.WorkflowTemplateB\x03\xe0\x41\x02"m\n\x1aGetWorkflowTemplateRequest\x12>\n\x04name\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(dataproc.googleapis.com/WorkflowTemplate\x12\x0f\n\x07version\x18\x02 \x01(\x05"\xbc\x02\n"InstantiateWorkflowTemplateRequest\x12>\n\x04name\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(dataproc.googleapis.com/WorkflowTemplate\x12\x0f\n\x07version\x18\x02 \x01(\x05\x12\x17\n\x0binstance_id\x18\x03 \x01(\tB\x02\x18\x01\x12\x12\n\nrequest_id\x18\x05 \x01(\t\x12\x65\n\nparameters\x18\x04 \x03(\x0b\x32Q.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.ParametersEntry\x1a\x31\n\x0fParametersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xdd\x01\n(InstantiateInlineWorkflowTemplateRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\x12(dataproc.googleapis.com/WorkflowTemplate\x12\x46\n\x08template\x18\x02 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.WorkflowTemplateB\x03\xe0\x41\x02\x12\x13\n\x0binstance_id\x18\x03 \x01(\t\x12\x12\n\nrequest_id\x18\x04 \x01(\t"g\n\x1dUpdateWorkflowTemplateRequest\x12\x46\n\x08template\x18\x01 \x01(\x0b\x32/.google.cloud.dataproc.v1beta2.WorkflowTemplateB\x03\xe0\x41\x02"\x87\x01\n\x1cListWorkflowTemplatesRequest\x12@\n\x06parent\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\x12(dataproc.googleapis.com/WorkflowTemplate\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t"\x86\x01\n\x1dListWorkflowTemplatesResponse\x12G\n\ttemplates\x18\x01 \x03(\x0b\x32/.google.cloud.dataproc.v1beta2.WorkflowTemplateB\x03\xe0\x41\x03\x12\x1c\n\x0fnext_page_token\x18\x02 \x01(\tB\x03\xe0\x41\x03"p\n\x1d\x44\x65leteWorkflowTemplateRequest\x12>\n\x04name\x18\x01 \x01(\tB0\xe0\x41\x02\xfa\x41*\n(dataproc.googleapis.com/WorkflowTemplate\x12\x0f\n\x07version\x18\x02 \x01(\x05\x32\xe9\x11\n\x17WorkflowTemplateService\x12\xb0\x02\n\x16\x43reateWorkflowTemplate\x12<.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest\x1a/.google.cloud.dataproc.v1beta2.WorkflowTemplate"\xa6\x01\x82\xd3\xe4\x93\x02\x8c\x01"8/v1beta2/{parent=projects/*/regions/*}/workflowTemplates:\x08templateZF":/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:\x08template\xda\x41\x10parent, template\x12\x89\x02\n\x13GetWorkflowTemplate\x12\x39.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest\x1a/.google.cloud.dataproc.v1beta2.WorkflowTemplate"\x85\x01\x82\xd3\xe4\x93\x02x\x12\x38/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}Z<\x12:/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}\xda\x41\x04name\x12\xe5\x02\n\x1bInstantiateWorkflowTemplate\x12\x41.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest\x1a\x1d.google.longrunning.Operation"\xe3\x01\x82\xd3\xe4\x93\x02\x96\x01"D/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate:\x01*ZK"F/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}:instantiate:\x01*\xda\x41\x04name\xda\x41\x10name, parameters\xca\x41)\n\x15google.protobuf.Empty\x12\x10WorkflowMetadata\x12\x84\x03\n!InstantiateInlineWorkflowTemplate\x12G.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest\x1a\x1d.google.longrunning.Operation"\xf6\x01\x82\xd3\xe4\x93\x02\xb0\x01"L/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline:\x08templateZV"J/v1beta2/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline:\x08template\xda\x41\x10parent, template\xca\x41)\n\x15google.protobuf.Empty\x12\x10WorkflowMetadata\x12\xba\x02\n\x16UpdateWorkflowTemplate\x12<.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest\x1a/.google.cloud.dataproc.v1beta2.WorkflowTemplate"\xb0\x01\x82\xd3\xe4\x93\x02\x9e\x01\x1a\x41/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}:\x08templateZO\x1a\x43/v1beta2/{template.name=projects/*/locations/*/workflowTemplates/*}:\x08template\xda\x41\x08template\x12\x9c\x02\n\x15ListWorkflowTemplates\x12;.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest\x1a<.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse"\x87\x01\x82\xd3\xe4\x93\x02x\x12\x38/v1beta2/{parent=projects/*/regions/*}/workflowTemplatesZ<\x12:/v1beta2/{parent=projects/*/locations/*}/workflowTemplates\xda\x41\x06parent\x12\xf6\x01\n\x16\x44\x65leteWorkflowTemplate\x12<.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest\x1a\x16.google.protobuf.Empty"\x85\x01\x82\xd3\xe4\x93\x02x*8/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}Z<*:/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}\xda\x41\x04name\x1aK\xca\x41\x17\x64\x61taproc.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\x84\x01\n!com.google.cloud.dataproc.v1beta2B\x16WorkflowTemplatesProtoP\x01ZEgoogle.golang.org/genproto/googleapis/cloud/dataproc/v1beta2;dataprocb\x06proto3', - dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, - google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, - google_dot_api_dot_resource__pb2.DESCRIPTOR, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2.DESCRIPTOR, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2.DESCRIPTOR, - google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, - google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, - google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, - ], -) - - -_WORKFLOWMETADATA_STATE = _descriptor.EnumDescriptor( - name="State", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.State", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="UNKNOWN", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="PENDING", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="DONE", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3570, - serialized_end=3626, -) -_sym_db.RegisterEnumDescriptor(_WORKFLOWMETADATA_STATE) - -_WORKFLOWNODE_NODESTATE = _descriptor.EnumDescriptor( - name="NodeState", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode.NodeState", - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name="NODE_STATUS_UNSPECIFIED", - index=0, - number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="BLOCKED", - index=1, - number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNABLE", - index=2, - number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="RUNNING", - index=3, - number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="COMPLETED", - index=4, - number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - _descriptor.EnumValueDescriptor( - name="FAILED", - index=5, - number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key, - ), - ], - containing_type=None, - serialized_options=None, - serialized_start=3987, - serialized_end=4094, -) -_sym_db.RegisterEnumDescriptor(_WORKFLOWNODE_NODESTATE) - - -_WORKFLOWTEMPLATE_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=887, - serialized_end=932, -) - -_WORKFLOWTEMPLATE = _descriptor.Descriptor( - name="WorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="id", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.id", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.name", - index=1, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.version", - index=2, - number=3, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_time", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.create_time", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="update_time", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.update_time", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.labels", - index=5, - number=6, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="placement", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.placement", - index=6, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="jobs", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.jobs", - index=7, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="parameters", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplate.parameters", - index=8, - number=9, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_WORKFLOWTEMPLATE_LABELSENTRY,], - enum_types=[], - serialized_options=b"\352A\306\001\n(dataproc.googleapis.com/WorkflowTemplate\022Iprojects/{project}/regions/{region}/workflowTemplates/{workflow_template}\022Mprojects/{project}/locations/{location}/workflowTemplates/{workflow_template} \001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=410, - serialized_end=1137, -) - - -_WORKFLOWTEMPLATEPLACEMENT = _descriptor.Descriptor( - name="WorkflowTemplatePlacement", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="managed_cluster", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.managed_cluster", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_selector", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.cluster_selector", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="placement", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement.placement", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1140, - serialized_end=1330, -) - - -_MANAGEDCLUSTER_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=887, - serialized_end=932, -) - -_MANAGEDCLUSTER = _descriptor.Descriptor( - name="ManagedCluster", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster.cluster_name", - index=0, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="config", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster.config", - index=1, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1beta2.ManagedCluster.labels", - index=2, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_MANAGEDCLUSTER_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1333, - serialized_end=1555, -) - - -_CLUSTERSELECTOR_CLUSTERLABELSENTRY = _descriptor.Descriptor( - name="ClusterLabelsEntry", - full_name="google.cloud.dataproc.v1beta2.ClusterSelector.ClusterLabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.ClusterSelector.ClusterLabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.ClusterSelector.ClusterLabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1682, - serialized_end=1734, -) - -_CLUSTERSELECTOR = _descriptor.Descriptor( - name="ClusterSelector", - full_name="google.cloud.dataproc.v1beta2.ClusterSelector", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="zone", - full_name="google.cloud.dataproc.v1beta2.ClusterSelector.zone", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_labels", - full_name="google.cloud.dataproc.v1beta2.ClusterSelector.cluster_labels", - index=1, - number=2, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_CLUSTERSELECTOR_CLUSTERLABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=1558, - serialized_end=1734, -) - - -_ORDEREDJOB_LABELSENTRY = _descriptor.Descriptor( - name="LabelsEntry", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.LabelsEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.LabelsEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.LabelsEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=887, - serialized_end=932, -) - -_ORDEREDJOB = _descriptor.Descriptor( - name="OrderedJob", - full_name="google.cloud.dataproc.v1beta2.OrderedJob", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="step_id", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.step_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="hadoop_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.hadoop_job", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.spark_job", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pyspark_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.pyspark_job", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="hive_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.hive_job", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="pig_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.pig_job", - index=5, - number=6, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_r_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.spark_r_job", - index=6, - number=11, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="spark_sql_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.spark_sql_job", - index=7, - number=7, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="presto_job", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.presto_job", - index=8, - number=12, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="labels", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.labels", - index=9, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="scheduling", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.scheduling", - index=10, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="prerequisite_step_ids", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids", - index=11, - number=10, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_ORDEREDJOB_LABELSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="job_type", - full_name="google.cloud.dataproc.v1beta2.OrderedJob.job_type", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=1737, - serialized_end=2521, -) - - -_TEMPLATEPARAMETER = _descriptor.Descriptor( - name="TemplateParameter", - full_name="google.cloud.dataproc.v1beta2.TemplateParameter", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1beta2.TemplateParameter.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="fields", - full_name="google.cloud.dataproc.v1beta2.TemplateParameter.fields", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="description", - full_name="google.cloud.dataproc.v1beta2.TemplateParameter.description", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="validation", - full_name="google.cloud.dataproc.v1beta2.TemplateParameter.validation", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2524, - serialized_end=2666, -) - - -_PARAMETERVALIDATION = _descriptor.Descriptor( - name="ParameterValidation", - full_name="google.cloud.dataproc.v1beta2.ParameterValidation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="regex", - full_name="google.cloud.dataproc.v1beta2.ParameterValidation.regex", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="values", - full_name="google.cloud.dataproc.v1beta2.ParameterValidation.values", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name="validation_type", - full_name="google.cloud.dataproc.v1beta2.ParameterValidation.validation_type", - index=0, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[], - ), - ], - serialized_start=2669, - serialized_end=2840, -) - - -_REGEXVALIDATION = _descriptor.Descriptor( - name="RegexValidation", - full_name="google.cloud.dataproc.v1beta2.RegexValidation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="regexes", - full_name="google.cloud.dataproc.v1beta2.RegexValidation.regexes", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2842, - serialized_end=2876, -) - - -_VALUEVALIDATION = _descriptor.Descriptor( - name="ValueValidation", - full_name="google.cloud.dataproc.v1beta2.ValueValidation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="values", - full_name="google.cloud.dataproc.v1beta2.ValueValidation.values", - index=0, - number=1, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2878, - serialized_end=2911, -) - - -_WORKFLOWMETADATA_PARAMETERSENTRY = _descriptor.Descriptor( - name="ParametersEntry", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.ParametersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.ParametersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.ParametersEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3519, - serialized_end=3568, -) - -_WORKFLOWMETADATA = _descriptor.Descriptor( - name="WorkflowMetadata", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="template", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.template", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.version", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="create_cluster", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.create_cluster", - index=2, - number=3, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="graph", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.graph", - index=3, - number=4, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="delete_cluster", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.delete_cluster", - index=4, - number=5, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.state", - index=5, - number=6, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_name", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.cluster_name", - index=6, - number=7, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="parameters", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.parameters", - index=7, - number=8, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="start_time", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.start_time", - index=8, - number=9, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="end_time", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.end_time", - index=9, - number=10, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="cluster_uuid", - full_name="google.cloud.dataproc.v1beta2.WorkflowMetadata.cluster_uuid", - index=10, - number=11, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_WORKFLOWMETADATA_PARAMETERSENTRY,], - enum_types=[_WORKFLOWMETADATA_STATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=2914, - serialized_end=3626, -) - - -_CLUSTEROPERATION = _descriptor.Descriptor( - name="ClusterOperation", - full_name="google.cloud.dataproc.v1beta2.ClusterOperation", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="operation_id", - full_name="google.cloud.dataproc.v1beta2.ClusterOperation.operation_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="error", - full_name="google.cloud.dataproc.v1beta2.ClusterOperation.error", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="done", - full_name="google.cloud.dataproc.v1beta2.ClusterOperation.done", - index=2, - number=3, - type=8, - cpp_type=7, - label=1, - has_default_value=False, - default_value=False, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3628, - serialized_end=3712, -) - - -_WORKFLOWGRAPH = _descriptor.Descriptor( - name="WorkflowGraph", - full_name="google.cloud.dataproc.v1beta2.WorkflowGraph", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="nodes", - full_name="google.cloud.dataproc.v1beta2.WorkflowGraph.nodes", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3714, - serialized_end=3794, -) - - -_WORKFLOWNODE = _descriptor.Descriptor( - name="WorkflowNode", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="step_id", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode.step_id", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="prerequisite_step_ids", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode.prerequisite_step_ids", - index=1, - number=2, - type=9, - cpp_type=9, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="job_id", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode.job_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="state", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode.state", - index=3, - number=5, - type=14, - cpp_type=8, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="error", - full_name="google.cloud.dataproc.v1beta2.WorkflowNode.error", - index=4, - number=6, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[_WORKFLOWNODE_NODESTATE,], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3797, - serialized_end=4094, -) - - -_CREATEWORKFLOWTEMPLATEREQUEST = _descriptor.Descriptor( - name="CreateWorkflowTemplateRequest", - full_name="google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A*\022(dataproc.googleapis.com/WorkflowTemplate", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="template", - full_name="google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest.template", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4097, - serialized_end=4266, -) - - -_GETWORKFLOWTEMPLATEREQUEST = _descriptor.Descriptor( - name="GetWorkflowTemplateRequest", - full_name="google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A*\n(dataproc.googleapis.com/WorkflowTemplate", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest.version", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4268, - serialized_end=4377, -) - - -_INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY = _descriptor.Descriptor( - name="ParametersEntry", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.ParametersEntry", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="key", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.ParametersEntry.key", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="value", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.ParametersEntry.value", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=b"8\001", - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=3519, - serialized_end=3568, -) - -_INSTANTIATEWORKFLOWTEMPLATEREQUEST = _descriptor.Descriptor( - name="InstantiateWorkflowTemplateRequest", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A*\n(dataproc.googleapis.com/WorkflowTemplate", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.version", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.instance_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\030\001", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.request_id", - index=3, - number=5, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="parameters", - full_name="google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.parameters", - index=4, - number=4, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[_INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY,], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4380, - serialized_end=4696, -) - - -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST = _descriptor.Descriptor( - name="InstantiateInlineWorkflowTemplateRequest", - full_name="google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A*\022(dataproc.googleapis.com/WorkflowTemplate", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="template", - full_name="google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.template", - index=1, - number=2, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="instance_id", - full_name="google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.instance_id", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="request_id", - full_name="google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest.request_id", - index=3, - number=4, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4699, - serialized_end=4920, -) - - -_UPDATEWORKFLOWTEMPLATEREQUEST = _descriptor.Descriptor( - name="UpdateWorkflowTemplateRequest", - full_name="google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="template", - full_name="google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest.template", - index=0, - number=1, - type=11, - cpp_type=10, - label=1, - has_default_value=False, - default_value=None, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=4922, - serialized_end=5025, -) - - -_LISTWORKFLOWTEMPLATESREQUEST = _descriptor.Descriptor( - name="ListWorkflowTemplatesRequest", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="parent", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.parent", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A*\022(dataproc.googleapis.com/WorkflowTemplate", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_size", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.page_size", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="page_token", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest.page_token", - index=2, - number=3, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5028, - serialized_end=5163, -) - - -_LISTWORKFLOWTEMPLATESRESPONSE = _descriptor.Descriptor( - name="ListWorkflowTemplatesResponse", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="templates", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.templates", - index=0, - number=1, - type=11, - cpp_type=10, - label=3, - has_default_value=False, - default_value=[], - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="next_page_token", - full_name="google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse.next_page_token", - index=1, - number=2, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\003", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5166, - serialized_end=5300, -) - - -_DELETEWORKFLOWTEMPLATEREQUEST = _descriptor.Descriptor( - name="DeleteWorkflowTemplateRequest", - full_name="google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest", - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name="name", - full_name="google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.name", - index=0, - number=1, - type=9, - cpp_type=9, - label=1, - has_default_value=False, - default_value=b"".decode("utf-8"), - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=b"\340A\002\372A*\n(dataproc.googleapis.com/WorkflowTemplate", - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - _descriptor.FieldDescriptor( - name="version", - full_name="google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest.version", - index=1, - number=2, - type=5, - cpp_type=1, - label=1, - has_default_value=False, - default_value=0, - message_type=None, - enum_type=None, - containing_type=None, - is_extension=False, - extension_scope=None, - serialized_options=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - ), - ], - extensions=[], - nested_types=[], - enum_types=[], - serialized_options=None, - is_extendable=False, - syntax="proto3", - extension_ranges=[], - oneofs=[], - serialized_start=5302, - serialized_end=5414, -) - -_WORKFLOWTEMPLATE_LABELSENTRY.containing_type = _WORKFLOWTEMPLATE -_WORKFLOWTEMPLATE.fields_by_name[ - "create_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_WORKFLOWTEMPLATE.fields_by_name[ - "update_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_WORKFLOWTEMPLATE.fields_by_name["labels"].message_type = _WORKFLOWTEMPLATE_LABELSENTRY -_WORKFLOWTEMPLATE.fields_by_name["placement"].message_type = _WORKFLOWTEMPLATEPLACEMENT -_WORKFLOWTEMPLATE.fields_by_name["jobs"].message_type = _ORDEREDJOB -_WORKFLOWTEMPLATE.fields_by_name["parameters"].message_type = _TEMPLATEPARAMETER -_WORKFLOWTEMPLATEPLACEMENT.fields_by_name[ - "managed_cluster" -].message_type = _MANAGEDCLUSTER -_WORKFLOWTEMPLATEPLACEMENT.fields_by_name[ - "cluster_selector" -].message_type = _CLUSTERSELECTOR -_WORKFLOWTEMPLATEPLACEMENT.oneofs_by_name["placement"].fields.append( - _WORKFLOWTEMPLATEPLACEMENT.fields_by_name["managed_cluster"] -) -_WORKFLOWTEMPLATEPLACEMENT.fields_by_name[ - "managed_cluster" -].containing_oneof = _WORKFLOWTEMPLATEPLACEMENT.oneofs_by_name["placement"] -_WORKFLOWTEMPLATEPLACEMENT.oneofs_by_name["placement"].fields.append( - _WORKFLOWTEMPLATEPLACEMENT.fields_by_name["cluster_selector"] -) -_WORKFLOWTEMPLATEPLACEMENT.fields_by_name[ - "cluster_selector" -].containing_oneof = _WORKFLOWTEMPLATEPLACEMENT.oneofs_by_name["placement"] -_MANAGEDCLUSTER_LABELSENTRY.containing_type = _MANAGEDCLUSTER -_MANAGEDCLUSTER.fields_by_name[ - "config" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_clusters__pb2._CLUSTERCONFIG -) -_MANAGEDCLUSTER.fields_by_name["labels"].message_type = _MANAGEDCLUSTER_LABELSENTRY -_CLUSTERSELECTOR_CLUSTERLABELSENTRY.containing_type = _CLUSTERSELECTOR -_CLUSTERSELECTOR.fields_by_name[ - "cluster_labels" -].message_type = _CLUSTERSELECTOR_CLUSTERLABELSENTRY -_ORDEREDJOB_LABELSENTRY.containing_type = _ORDEREDJOB -_ORDEREDJOB.fields_by_name[ - "hadoop_job" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._HADOOPJOB -) -_ORDEREDJOB.fields_by_name[ - "spark_job" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._SPARKJOB -) -_ORDEREDJOB.fields_by_name[ - "pyspark_job" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._PYSPARKJOB -) -_ORDEREDJOB.fields_by_name[ - "hive_job" -].message_type = google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._HIVEJOB -_ORDEREDJOB.fields_by_name[ - "pig_job" -].message_type = google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._PIGJOB -_ORDEREDJOB.fields_by_name[ - "spark_r_job" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._SPARKRJOB -) -_ORDEREDJOB.fields_by_name[ - "spark_sql_job" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._SPARKSQLJOB -) -_ORDEREDJOB.fields_by_name[ - "presto_job" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._PRESTOJOB -) -_ORDEREDJOB.fields_by_name["labels"].message_type = _ORDEREDJOB_LABELSENTRY -_ORDEREDJOB.fields_by_name[ - "scheduling" -].message_type = ( - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_jobs__pb2._JOBSCHEDULING -) -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["hadoop_job"] -) -_ORDEREDJOB.fields_by_name["hadoop_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["spark_job"] -) -_ORDEREDJOB.fields_by_name["spark_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["pyspark_job"] -) -_ORDEREDJOB.fields_by_name["pyspark_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["hive_job"] -) -_ORDEREDJOB.fields_by_name["hive_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["pig_job"] -) -_ORDEREDJOB.fields_by_name["pig_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["spark_r_job"] -) -_ORDEREDJOB.fields_by_name["spark_r_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["spark_sql_job"] -) -_ORDEREDJOB.fields_by_name[ - "spark_sql_job" -].containing_oneof = _ORDEREDJOB.oneofs_by_name["job_type"] -_ORDEREDJOB.oneofs_by_name["job_type"].fields.append( - _ORDEREDJOB.fields_by_name["presto_job"] -) -_ORDEREDJOB.fields_by_name["presto_job"].containing_oneof = _ORDEREDJOB.oneofs_by_name[ - "job_type" -] -_TEMPLATEPARAMETER.fields_by_name["validation"].message_type = _PARAMETERVALIDATION -_PARAMETERVALIDATION.fields_by_name["regex"].message_type = _REGEXVALIDATION -_PARAMETERVALIDATION.fields_by_name["values"].message_type = _VALUEVALIDATION -_PARAMETERVALIDATION.oneofs_by_name["validation_type"].fields.append( - _PARAMETERVALIDATION.fields_by_name["regex"] -) -_PARAMETERVALIDATION.fields_by_name[ - "regex" -].containing_oneof = _PARAMETERVALIDATION.oneofs_by_name["validation_type"] -_PARAMETERVALIDATION.oneofs_by_name["validation_type"].fields.append( - _PARAMETERVALIDATION.fields_by_name["values"] -) -_PARAMETERVALIDATION.fields_by_name[ - "values" -].containing_oneof = _PARAMETERVALIDATION.oneofs_by_name["validation_type"] -_WORKFLOWMETADATA_PARAMETERSENTRY.containing_type = _WORKFLOWMETADATA -_WORKFLOWMETADATA.fields_by_name["create_cluster"].message_type = _CLUSTEROPERATION -_WORKFLOWMETADATA.fields_by_name["graph"].message_type = _WORKFLOWGRAPH -_WORKFLOWMETADATA.fields_by_name["delete_cluster"].message_type = _CLUSTEROPERATION -_WORKFLOWMETADATA.fields_by_name["state"].enum_type = _WORKFLOWMETADATA_STATE -_WORKFLOWMETADATA.fields_by_name[ - "parameters" -].message_type = _WORKFLOWMETADATA_PARAMETERSENTRY -_WORKFLOWMETADATA.fields_by_name[ - "start_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_WORKFLOWMETADATA.fields_by_name[ - "end_time" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -_WORKFLOWMETADATA_STATE.containing_type = _WORKFLOWMETADATA -_WORKFLOWGRAPH.fields_by_name["nodes"].message_type = _WORKFLOWNODE -_WORKFLOWNODE.fields_by_name["state"].enum_type = _WORKFLOWNODE_NODESTATE -_WORKFLOWNODE_NODESTATE.containing_type = _WORKFLOWNODE -_CREATEWORKFLOWTEMPLATEREQUEST.fields_by_name[ - "template" -].message_type = _WORKFLOWTEMPLATE -_INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY.containing_type = ( - _INSTANTIATEWORKFLOWTEMPLATEREQUEST -) -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name[ - "parameters" -].message_type = _INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST.fields_by_name[ - "template" -].message_type = _WORKFLOWTEMPLATE -_UPDATEWORKFLOWTEMPLATEREQUEST.fields_by_name[ - "template" -].message_type = _WORKFLOWTEMPLATE -_LISTWORKFLOWTEMPLATESRESPONSE.fields_by_name[ - "templates" -].message_type = _WORKFLOWTEMPLATE -DESCRIPTOR.message_types_by_name["WorkflowTemplate"] = _WORKFLOWTEMPLATE -DESCRIPTOR.message_types_by_name[ - "WorkflowTemplatePlacement" -] = _WORKFLOWTEMPLATEPLACEMENT -DESCRIPTOR.message_types_by_name["ManagedCluster"] = _MANAGEDCLUSTER -DESCRIPTOR.message_types_by_name["ClusterSelector"] = _CLUSTERSELECTOR -DESCRIPTOR.message_types_by_name["OrderedJob"] = _ORDEREDJOB -DESCRIPTOR.message_types_by_name["TemplateParameter"] = _TEMPLATEPARAMETER -DESCRIPTOR.message_types_by_name["ParameterValidation"] = _PARAMETERVALIDATION -DESCRIPTOR.message_types_by_name["RegexValidation"] = _REGEXVALIDATION -DESCRIPTOR.message_types_by_name["ValueValidation"] = _VALUEVALIDATION -DESCRIPTOR.message_types_by_name["WorkflowMetadata"] = _WORKFLOWMETADATA -DESCRIPTOR.message_types_by_name["ClusterOperation"] = _CLUSTEROPERATION -DESCRIPTOR.message_types_by_name["WorkflowGraph"] = _WORKFLOWGRAPH -DESCRIPTOR.message_types_by_name["WorkflowNode"] = _WORKFLOWNODE -DESCRIPTOR.message_types_by_name[ - "CreateWorkflowTemplateRequest" -] = _CREATEWORKFLOWTEMPLATEREQUEST -DESCRIPTOR.message_types_by_name[ - "GetWorkflowTemplateRequest" -] = _GETWORKFLOWTEMPLATEREQUEST -DESCRIPTOR.message_types_by_name[ - "InstantiateWorkflowTemplateRequest" -] = _INSTANTIATEWORKFLOWTEMPLATEREQUEST -DESCRIPTOR.message_types_by_name[ - "InstantiateInlineWorkflowTemplateRequest" -] = _INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST -DESCRIPTOR.message_types_by_name[ - "UpdateWorkflowTemplateRequest" -] = _UPDATEWORKFLOWTEMPLATEREQUEST -DESCRIPTOR.message_types_by_name[ - "ListWorkflowTemplatesRequest" -] = _LISTWORKFLOWTEMPLATESREQUEST -DESCRIPTOR.message_types_by_name[ - "ListWorkflowTemplatesResponse" -] = _LISTWORKFLOWTEMPLATESRESPONSE -DESCRIPTOR.message_types_by_name[ - "DeleteWorkflowTemplateRequest" -] = _DELETEWORKFLOWTEMPLATEREQUEST -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -WorkflowTemplate = _reflection.GeneratedProtocolMessageType( - "WorkflowTemplate", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWTEMPLATE_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowTemplate.LabelsEntry) - }, - ), - "DESCRIPTOR": _WORKFLOWTEMPLATE, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A Dataproc workflow template resource. - - Attributes: - id: - Required. The template id. The id must contain only letters - (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). - Cannot begin or end with underscore or hyphen. Must consist of - between 3 and 50 characters. . - name: - Output only. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates``, the resource name of - the template has the following format: ``projects/{proje - ct_id}/regions/{region}/workflowTemplates/{template_id}`` - - For ``projects.locations.workflowTemplates``, the resource - name of the template has the following format: ``project - s/{project_id}/locations/{location}/workflowTemplates/{templat - e_id}`` - version: - Optional. Used to perform a consistent read-modify-write. - This field should be left blank for a - ``CreateWorkflowTemplate`` request. It is required for an - ``UpdateWorkflowTemplate`` request, and must match the current - server version. A typical update template flow would fetch the - current template with a ``GetWorkflowTemplate`` request, which - will return the current template with the ``version`` field - filled in with the current server version. The user updates - other fields in the template, then returns it as part of the - ``UpdateWorkflowTemplate`` request. - create_time: - Output only. The time template was created. - update_time: - Output only. The time template was last updated. - labels: - Optional. The labels to associate with this template. These - labels will be propagated to all jobs and clusters created by - the workflow instance. Label **keys** must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. Label **values** - may be empty, but, if present, must contain 1 to 63 - characters, and must conform to `RFC 1035 - `__. No more than 32 - labels can be associated with a template. - placement: - Required. WorkflowTemplate scheduling information. - jobs: - Required. The Directed Acyclic Graph of Jobs to submit. - parameters: - Optional. Template parameters whose values are substituted - into the template. Values for parameters must be provided when - the template is instantiated. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowTemplate) - }, -) -_sym_db.RegisterMessage(WorkflowTemplate) -_sym_db.RegisterMessage(WorkflowTemplate.LabelsEntry) - -WorkflowTemplatePlacement = _reflection.GeneratedProtocolMessageType( - "WorkflowTemplatePlacement", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWTEMPLATEPLACEMENT, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """Specifies workflow execution target. Either ``managed_cluster`` or - ``cluster_selector`` is required. - - Attributes: - placement: - Required. Specifies where workflow executes; either on a - managed cluster or an existing cluster chosen by labels. - managed_cluster: - Optional. A cluster that is managed by the workflow. - cluster_selector: - Optional. A selector that chooses target cluster for jobs - based on metadata. The selector is evaluated at the time each - job is submitted. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement) - }, -) -_sym_db.RegisterMessage(WorkflowTemplatePlacement) - -ManagedCluster = _reflection.GeneratedProtocolMessageType( - "ManagedCluster", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _MANAGEDCLUSTER_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ManagedCluster.LabelsEntry) - }, - ), - "DESCRIPTOR": _MANAGEDCLUSTER, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """Cluster that is managed by the workflow. - - Attributes: - cluster_name: - Required. The cluster name prefix. A unique cluster name will - be formed by appending a random suffix. The name must contain - only lower-case letters (a-z), numbers (0-9), and hyphens (-). - Must begin with a letter. Cannot begin or end with hyphen. - Must consist of between 2 and 35 characters. - config: - Required. The cluster configuration. - labels: - Optional. The labels to associate with this cluster. Label - keys must be between 1 and 63 characters long. Label values must be between - 1 and 63 characters long. No more than 32 - labels can be associated with a given cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ManagedCluster) - }, -) -_sym_db.RegisterMessage(ManagedCluster) -_sym_db.RegisterMessage(ManagedCluster.LabelsEntry) - -ClusterSelector = _reflection.GeneratedProtocolMessageType( - "ClusterSelector", - (_message.Message,), - { - "ClusterLabelsEntry": _reflection.GeneratedProtocolMessageType( - "ClusterLabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTERSELECTOR_CLUSTERLABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterSelector.ClusterLabelsEntry) - }, - ), - "DESCRIPTOR": _CLUSTERSELECTOR, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A selector that chooses target cluster for jobs based on metadata. - - Attributes: - zone: - Optional. The zone where workflow process executes. This - parameter does not affect the selection of the cluster. If - unspecified, the zone of the first cluster matching the - selector is used. - cluster_labels: - Required. The cluster labels. Cluster must have all labels to - match. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterSelector) - }, -) -_sym_db.RegisterMessage(ClusterSelector) -_sym_db.RegisterMessage(ClusterSelector.ClusterLabelsEntry) - -OrderedJob = _reflection.GeneratedProtocolMessageType( - "OrderedJob", - (_message.Message,), - { - "LabelsEntry": _reflection.GeneratedProtocolMessageType( - "LabelsEntry", - (_message.Message,), - { - "DESCRIPTOR": _ORDEREDJOB_LABELSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.OrderedJob.LabelsEntry) - }, - ), - "DESCRIPTOR": _ORDEREDJOB, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A job executed by the workflow. - - Attributes: - step_id: - Required. The step id. The id must be unique among all jobs - within the template. The step id is used as prefix for job - id, as job ``goog-dataproc-workflow-step-id`` label, and in [p - rerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.p - rerequisite_step_ids] field from other steps. The id must - contain only letters (a-z, A-Z), numbers (0-9), underscores - (_), and hyphens (-). Cannot begin or end with underscore or - hyphen. Must consist of between 3 and 50 characters. - job_type: - Required. The job definition. - spark_r_job: - Spark R job - presto_job: - Presto job - labels: - Optional. The labels to associate with this job. Label keys - must be between 1 and 63 characters long. Label values must be between - 1 and 63 characters long. No more than 32 labels can be - associated with a given job. - scheduling: - Optional. Job scheduling configuration. - prerequisite_step_ids: - Optional. The optional list of prerequisite job step_ids. If - not specified, the job will start at the beginning of - workflow. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.OrderedJob) - }, -) -_sym_db.RegisterMessage(OrderedJob) -_sym_db.RegisterMessage(OrderedJob.LabelsEntry) - -TemplateParameter = _reflection.GeneratedProtocolMessageType( - "TemplateParameter", - (_message.Message,), - { - "DESCRIPTOR": _TEMPLATEPARAMETER, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A configurable parameter that replaces one or more fields in the - template. Parameterizable fields: - Labels - File uris - Job - properties - Job arguments - Script variables - Main class (in - HadoopJob and SparkJob) - Zone (in ClusterSelector) - - Attributes: - name: - Required. Parameter name. The parameter name is used as the - key, and paired with the parameter value, which are passed to - the template when the template is instantiated. The name must - contain only capital letters (A-Z), numbers (0-9), and - underscores (_), and must not start with a number. The maximum - length is 40 characters. - fields: - Required. Paths to all fields that the parameter replaces. A - field is allowed to appear in at most one parameter’s list of - field paths. A field path is similar in syntax to a - [google.protobuf.FieldMask][google.protobuf.FieldMask]. For - example, a field path that references the zone field of a - workflow template’s cluster selector would be specified as - ``placement.clusterSelector.zone``. Also, field paths can - reference fields using the following syntax: - Values in - maps can be referenced by key: - labels[‘key’] - - placement.clusterSelector.clusterLabels[‘key’] - - placement.managedCluster.labels[‘key’] - - placement.clusterSelector.clusterLabels[‘key’] - - jobs[‘step-id’].labels[‘key’] - Jobs in the jobs list can be - referenced by step-id: - jobs[‘step- - id’].hadoopJob.mainJarFileUri - jobs[‘step- - id’].hiveJob.queryFileUri - jobs[‘step- - id’].pySparkJob.mainPythonFileUri - jobs[‘step- - id’].hadoopJob.jarFileUris[0] - jobs[‘step- - id’].hadoopJob.archiveUris[0] - jobs[‘step- - id’].hadoopJob.fileUris[0] - jobs[‘step- - id’].pySparkJob.pythonFileUris[0] - Items in repeated fields - can be referenced by a zero-based index: - jobs[‘step- - id’].sparkJob.args[0] - Other examples: - jobs[‘step- - id’].hadoopJob.properties[‘key’] - jobs[‘step- - id’].hadoopJob.args[0] - jobs[‘step- - id’].hiveJob.scriptVariables[‘key’] - jobs[‘step- - id’].hadoopJob.mainJarFileUri - - placement.clusterSelector.zone It may not be possible to - parameterize maps and repeated fields in their entirety since - only individual map values and individual items in repeated - fields can be referenced. For example, the following field - paths are invalid: - placement.clusterSelector.clusterLabels - - jobs[‘step-id’].sparkJob.args - description: - Optional. Brief description of the parameter. Must not exceed - 1024 characters. - validation: - Optional. Validation rules to be applied to this parameter’s - value. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.TemplateParameter) - }, -) -_sym_db.RegisterMessage(TemplateParameter) - -ParameterValidation = _reflection.GeneratedProtocolMessageType( - "ParameterValidation", - (_message.Message,), - { - "DESCRIPTOR": _PARAMETERVALIDATION, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """Configuration for parameter validation. - - Attributes: - validation_type: - Required. The type of validation to be performed. - regex: - Validation based on regular expressions. - values: - Validation based on a list of allowed values. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ParameterValidation) - }, -) -_sym_db.RegisterMessage(ParameterValidation) - -RegexValidation = _reflection.GeneratedProtocolMessageType( - "RegexValidation", - (_message.Message,), - { - "DESCRIPTOR": _REGEXVALIDATION, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """Validation based on regular expressions. - - Attributes: - regexes: - Required. RE2 regular expressions used to validate the - parameter’s value. The value must match the regex in its - entirety (substring matches are not sufficient). - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.RegexValidation) - }, -) -_sym_db.RegisterMessage(RegexValidation) - -ValueValidation = _reflection.GeneratedProtocolMessageType( - "ValueValidation", - (_message.Message,), - { - "DESCRIPTOR": _VALUEVALIDATION, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """Validation based on a list of allowed values. - - Attributes: - values: - Required. List of allowed values for the parameter. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ValueValidation) - }, -) -_sym_db.RegisterMessage(ValueValidation) - -WorkflowMetadata = _reflection.GeneratedProtocolMessageType( - "WorkflowMetadata", - (_message.Message,), - { - "ParametersEntry": _reflection.GeneratedProtocolMessageType( - "ParametersEntry", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWMETADATA_PARAMETERSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowMetadata.ParametersEntry) - }, - ), - "DESCRIPTOR": _WORKFLOWMETADATA, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A Dataproc workflow template resource. - - Attributes: - template: - Output only. The resource name of the workflow template as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates``, the resource name of - the template has the following format: ``projects/{proje - ct_id}/regions/{region}/workflowTemplates/{template_id}`` - - For ``projects.locations.workflowTemplates``, the resource - name of the template has the following format: ``project - s/{project_id}/locations/{location}/workflowTemplates/{templat - e_id}`` - version: - Output only. The version of template at the time of workflow - instantiation. - create_cluster: - Output only. The create cluster operation metadata. - graph: - Output only. The workflow graph. - delete_cluster: - Output only. The delete cluster operation metadata. - state: - Output only. The workflow state. - cluster_name: - Output only. The name of the target cluster. - parameters: - Map from parameter names to values that were used for those - parameters. - start_time: - Output only. Workflow start time. - end_time: - Output only. Workflow end time. - cluster_uuid: - Output only. The UUID of target cluster. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowMetadata) - }, -) -_sym_db.RegisterMessage(WorkflowMetadata) -_sym_db.RegisterMessage(WorkflowMetadata.ParametersEntry) - -ClusterOperation = _reflection.GeneratedProtocolMessageType( - "ClusterOperation", - (_message.Message,), - { - "DESCRIPTOR": _CLUSTEROPERATION, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """The cluster operation triggered by a workflow. - - Attributes: - operation_id: - Output only. The id of the cluster operation. - error: - Output only. Error, if operation failed. - done: - Output only. Indicates the operation is done. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ClusterOperation) - }, -) -_sym_db.RegisterMessage(ClusterOperation) - -WorkflowGraph = _reflection.GeneratedProtocolMessageType( - "WorkflowGraph", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWGRAPH, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """The workflow graph. - - Attributes: - nodes: - Output only. The workflow nodes. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowGraph) - }, -) -_sym_db.RegisterMessage(WorkflowGraph) - -WorkflowNode = _reflection.GeneratedProtocolMessageType( - "WorkflowNode", - (_message.Message,), - { - "DESCRIPTOR": _WORKFLOWNODE, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """The workflow node. - - Attributes: - step_id: - Output only. The name of the node. - prerequisite_step_ids: - Output only. Node’s prerequisite nodes. - job_id: - Output only. The job id; populated after the node enters - RUNNING state. - state: - Output only. The node state. - error: - Output only. The error detail. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.WorkflowNode) - }, -) -_sym_db.RegisterMessage(WorkflowNode) - -CreateWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "CreateWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _CREATEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to create a workflow template. - - Attributes: - parent: - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates,create``, the resource - name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.workflowTemplates.create``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template: - Required. The Dataproc workflow template to create. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(CreateWorkflowTemplateRequest) - -GetWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "GetWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _GETWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to fetch a workflow template. - - Attributes: - name: - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates.get``, the resource name - of the template has the following format: ``projects/{pr - oject_id}/regions/{region}/workflowTemplates/{template_id}`` - - For ``projects.locations.workflowTemplates.get``, the - resource name of the template has the following format: - ``projects/{project_id}/locations/{location}/workflowTemplates - /{template_id}`` - version: - Optional. The version of workflow template to retrieve. Only - previously instantiated versions can be retrieved. If - unspecified, retrieves the current version. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(GetWorkflowTemplateRequest) - -InstantiateWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "InstantiateWorkflowTemplateRequest", - (_message.Message,), - { - "ParametersEntry": _reflection.GeneratedProtocolMessageType( - "ParametersEntry", - (_message.Message,), - { - "DESCRIPTOR": _INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2" - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest.ParametersEntry) - }, - ), - "DESCRIPTOR": _INSTANTIATEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to instantiate a workflow template. - - Attributes: - name: - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates.instantiate``, the - resource name of the template has the following format: - ``projects/{project_id}/regions/{region}/workflowTemplates/{te - mplate_id}`` - For - ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: ``p - rojects/{project_id}/locations/{location}/workflowTemplates/{t - emplate_id}`` - version: - Optional. The version of workflow template to instantiate. If - specified, the workflow will be instantiated only if the - current version of the workflow template has the supplied - version. This option cannot be used to instantiate a previous - version of workflow template. - instance_id: - Deprecated. Please use ``request_id`` field instead. - request_id: - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates risk - of concurrent instances started due to retries. It is - recommended to always set this value to a `UUID `__. The tag - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - parameters: - Optional. Map from parameter names to values that should be - used for those parameters. Values may not exceed 100 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(InstantiateWorkflowTemplateRequest) -_sym_db.RegisterMessage(InstantiateWorkflowTemplateRequest.ParametersEntry) - -InstantiateInlineWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "InstantiateInlineWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to instantiate an inline workflow template. - - Attributes: - parent: - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates,instantiateinline``, the - resource name of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.workflowTemplates.instantiateinline``, - the resource name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - template: - Required. The workflow template to instantiate. - instance_id: - Deprecated. Please use ``request_id`` field instead. - request_id: - Optional. A tag that prevents multiple concurrent workflow - instances with the same tag from running. This mitigates risk - of concurrent instances started due to retries. It is - recommended to always set this value to a `UUID `__. The tag - must contain only letters (a-z, A-Z), numbers (0-9), - underscores (_), and hyphens (-). The maximum length is 40 - characters. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(InstantiateInlineWorkflowTemplateRequest) - -UpdateWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "UpdateWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _UPDATEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to update a workflow template. - - Attributes: - template: - Required. The updated workflow template. The - ``template.version`` field must match the current version. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(UpdateWorkflowTemplateRequest) - -ListWorkflowTemplatesRequest = _reflection.GeneratedProtocolMessageType( - "ListWorkflowTemplatesRequest", - (_message.Message,), - { - "DESCRIPTOR": _LISTWORKFLOWTEMPLATESREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to list workflow templates in a project. - - Attributes: - parent: - Required. The resource name of the region or location, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates,list``, the resource name - of the region has the following format: - ``projects/{project_id}/regions/{region}`` - For - ``projects.locations.workflowTemplates.list``, the resource - name of the location has the following format: - ``projects/{project_id}/locations/{location}`` - page_size: - Optional. The maximum number of results to return in each - response. - page_token: - Optional. The page token, returned by a previous call, to - request the next page of results. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest) - }, -) -_sym_db.RegisterMessage(ListWorkflowTemplatesRequest) - -ListWorkflowTemplatesResponse = _reflection.GeneratedProtocolMessageType( - "ListWorkflowTemplatesResponse", - (_message.Message,), - { - "DESCRIPTOR": _LISTWORKFLOWTEMPLATESRESPONSE, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A response to a request to list workflow templates in a project. - - Attributes: - templates: - Output only. WorkflowTemplates list. - next_page_token: - Output only. This token is included in the response if there - are more results to fetch. To fetch additional results, - provide this value as the page_token in a subsequent - ListWorkflowTemplatesRequest. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse) - }, -) -_sym_db.RegisterMessage(ListWorkflowTemplatesResponse) - -DeleteWorkflowTemplateRequest = _reflection.GeneratedProtocolMessageType( - "DeleteWorkflowTemplateRequest", - (_message.Message,), - { - "DESCRIPTOR": _DELETEWORKFLOWTEMPLATEREQUEST, - "__module__": "google.cloud.dataproc_v1beta2.proto.workflow_templates_pb2", - "__doc__": """A request to delete a workflow template. Currently started workflows - will remain running. - - Attributes: - name: - Required. The resource name of the workflow template, as - described in - https://cloud.google.com/apis/design/resource_names. - For - ``projects.regions.workflowTemplates.delete``, the resource - name of the template has the following format: ``project - s/{project_id}/regions/{region}/workflowTemplates/{template_id - }`` - For - ``projects.locations.workflowTemplates.instantiate``, the - resource name of the template has the following format: ``p - rojects/{project_id}/locations/{location}/workflowTemplates/{t - emplate_id}`` - version: - Optional. The version of workflow template to delete. If - specified, will only delete the template if the current server - version matches specified version. - """, - # @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest) - }, -) -_sym_db.RegisterMessage(DeleteWorkflowTemplateRequest) - - -DESCRIPTOR._options = None -_WORKFLOWTEMPLATE_LABELSENTRY._options = None -_WORKFLOWTEMPLATE.fields_by_name["id"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["name"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["version"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["create_time"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["update_time"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["labels"]._options = None -_WORKFLOWTEMPLATE.fields_by_name["parameters"]._options = None -_WORKFLOWTEMPLATE._options = None -_MANAGEDCLUSTER_LABELSENTRY._options = None -_CLUSTERSELECTOR_CLUSTERLABELSENTRY._options = None -_ORDEREDJOB_LABELSENTRY._options = None -_ORDEREDJOB.fields_by_name["step_id"]._options = None -_ORDEREDJOB.fields_by_name["labels"]._options = None -_ORDEREDJOB.fields_by_name["scheduling"]._options = None -_ORDEREDJOB.fields_by_name["prerequisite_step_ids"]._options = None -_WORKFLOWMETADATA_PARAMETERSENTRY._options = None -_WORKFLOWMETADATA.fields_by_name["template"]._options = None -_WORKFLOWMETADATA.fields_by_name["version"]._options = None -_WORKFLOWMETADATA.fields_by_name["create_cluster"]._options = None -_WORKFLOWMETADATA.fields_by_name["graph"]._options = None -_WORKFLOWMETADATA.fields_by_name["delete_cluster"]._options = None -_WORKFLOWMETADATA.fields_by_name["state"]._options = None -_WORKFLOWMETADATA.fields_by_name["cluster_name"]._options = None -_WORKFLOWMETADATA.fields_by_name["start_time"]._options = None -_WORKFLOWMETADATA.fields_by_name["end_time"]._options = None -_WORKFLOWMETADATA.fields_by_name["cluster_uuid"]._options = None -_CLUSTEROPERATION.fields_by_name["operation_id"]._options = None -_CLUSTEROPERATION.fields_by_name["error"]._options = None -_CLUSTEROPERATION.fields_by_name["done"]._options = None -_WORKFLOWGRAPH.fields_by_name["nodes"]._options = None -_WORKFLOWNODE.fields_by_name["step_id"]._options = None -_WORKFLOWNODE.fields_by_name["prerequisite_step_ids"]._options = None -_WORKFLOWNODE.fields_by_name["job_id"]._options = None -_WORKFLOWNODE.fields_by_name["state"]._options = None -_WORKFLOWNODE.fields_by_name["error"]._options = None -_CREATEWORKFLOWTEMPLATEREQUEST.fields_by_name["parent"]._options = None -_CREATEWORKFLOWTEMPLATEREQUEST.fields_by_name["template"]._options = None -_GETWORKFLOWTEMPLATEREQUEST.fields_by_name["name"]._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST_PARAMETERSENTRY._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name["name"]._options = None -_INSTANTIATEWORKFLOWTEMPLATEREQUEST.fields_by_name["instance_id"]._options = None -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST.fields_by_name["parent"]._options = None -_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST.fields_by_name["template"]._options = None -_UPDATEWORKFLOWTEMPLATEREQUEST.fields_by_name["template"]._options = None -_LISTWORKFLOWTEMPLATESREQUEST.fields_by_name["parent"]._options = None -_LISTWORKFLOWTEMPLATESRESPONSE.fields_by_name["templates"]._options = None -_LISTWORKFLOWTEMPLATESRESPONSE.fields_by_name["next_page_token"]._options = None -_DELETEWORKFLOWTEMPLATEREQUEST.fields_by_name["name"]._options = None - -_WORKFLOWTEMPLATESERVICE = _descriptor.ServiceDescriptor( - name="WorkflowTemplateService", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService", - file=DESCRIPTOR, - index=0, - serialized_options=b"\312A\027dataproc.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", - create_key=_descriptor._internal_create_key, - serialized_start=5417, - serialized_end=7698, - methods=[ - _descriptor.MethodDescriptor( - name="CreateWorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate", - index=0, - containing_service=None, - input_type=_CREATEWORKFLOWTEMPLATEREQUEST, - output_type=_WORKFLOWTEMPLATE, - serialized_options=b'\202\323\344\223\002\214\001"8/v1beta2/{parent=projects/*/regions/*}/workflowTemplates:\010templateZF":/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:\010template\332A\020parent, template', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="GetWorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.GetWorkflowTemplate", - index=1, - containing_service=None, - input_type=_GETWORKFLOWTEMPLATEREQUEST, - output_type=_WORKFLOWTEMPLATE, - serialized_options=b"\202\323\344\223\002x\0228/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}Z<\022:/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="InstantiateWorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate", - index=2, - containing_service=None, - input_type=_INSTANTIATEWORKFLOWTEMPLATEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002\226\001"D/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate:\001*ZK"F/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}:instantiate:\001*\332A\004name\332A\020name, parameters\312A)\n\025google.protobuf.Empty\022\020WorkflowMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="InstantiateInlineWorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateInlineWorkflowTemplate", - index=3, - containing_service=None, - input_type=_INSTANTIATEINLINEWORKFLOWTEMPLATEREQUEST, - output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=b'\202\323\344\223\002\260\001"L/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline:\010templateZV"J/v1beta2/{parent=projects/*/regions/*}/workflowTemplates:instantiateInline:\010template\332A\020parent, template\312A)\n\025google.protobuf.Empty\022\020WorkflowMetadata', - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="UpdateWorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.UpdateWorkflowTemplate", - index=4, - containing_service=None, - input_type=_UPDATEWORKFLOWTEMPLATEREQUEST, - output_type=_WORKFLOWTEMPLATE, - serialized_options=b"\202\323\344\223\002\236\001\032A/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}:\010templateZO\032C/v1beta2/{template.name=projects/*/locations/*/workflowTemplates/*}:\010template\332A\010template", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="ListWorkflowTemplates", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.ListWorkflowTemplates", - index=5, - containing_service=None, - input_type=_LISTWORKFLOWTEMPLATESREQUEST, - output_type=_LISTWORKFLOWTEMPLATESRESPONSE, - serialized_options=b"\202\323\344\223\002x\0228/v1beta2/{parent=projects/*/regions/*}/workflowTemplatesZ<\022:/v1beta2/{parent=projects/*/locations/*}/workflowTemplates\332A\006parent", - create_key=_descriptor._internal_create_key, - ), - _descriptor.MethodDescriptor( - name="DeleteWorkflowTemplate", - full_name="google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate", - index=6, - containing_service=None, - input_type=_DELETEWORKFLOWTEMPLATEREQUEST, - output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, - serialized_options=b"\202\323\344\223\002x*8/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}Z<*:/v1beta2/{name=projects/*/locations/*/workflowTemplates/*}\332A\004name", - create_key=_descriptor._internal_create_key, - ), - ], -) -_sym_db.RegisterServiceDescriptor(_WORKFLOWTEMPLATESERVICE) - -DESCRIPTOR.services_by_name["WorkflowTemplateService"] = _WORKFLOWTEMPLATESERVICE - -# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py b/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py deleted file mode 100644 index 72d48e23..00000000 --- a/google/cloud/dataproc_v1beta2/proto/workflow_templates_pb2_grpc.py +++ /dev/null @@ -1,400 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -"""Client and server classes corresponding to protobuf-defined services.""" -import grpc - -from google.cloud.dataproc_v1beta2.proto import ( - workflow_templates_pb2 as google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2, -) -from google.longrunning import ( - operations_pb2 as google_dot_longrunning_dot_operations__pb2, -) -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -class WorkflowTemplateServiceStub(object): - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CreateWorkflowTemplate = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.CreateWorkflowTemplateRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString, - ) - self.GetWorkflowTemplate = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.GetWorkflowTemplateRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString, - ) - self.InstantiateWorkflowTemplate = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateWorkflowTemplateRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.InstantiateInlineWorkflowTemplate = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateInlineWorkflowTemplateRequest.SerializeToString, - response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, - ) - self.UpdateWorkflowTemplate = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.UpdateWorkflowTemplateRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString, - ) - self.ListWorkflowTemplates = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesRequest.SerializeToString, - response_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesResponse.FromString, - ) - self.DeleteWorkflowTemplate = channel.unary_unary( - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate", - request_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.DeleteWorkflowTemplateRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class WorkflowTemplateServiceServicer(object): - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - def CreateWorkflowTemplate(self, request, context): - """Creates new workflow template. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def GetWorkflowTemplate(self, request, context): - """Retrieves the latest workflow template. - - Can retrieve previously instantiated template by specifying optional - version parameter. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def InstantiateWorkflowTemplate(self, request, context): - """Instantiates a template and begins execution. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and workflow-owned - clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] will be - [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata). - Also see [Using - WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will be - [Empty][google.protobuf.Empty]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def InstantiateInlineWorkflowTemplate(self, request, context): - """Instantiates a template and begins execution. - - This method is equivalent to executing the sequence - [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], - [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. - - The returned Operation can be used to track execution of - workflow by polling - [operations.get][google.longrunning.Operations.GetOperation]. - The Operation will complete when entire workflow is finished. - - The running workflow can be aborted via - [operations.cancel][google.longrunning.Operations.CancelOperation]. - This will cause any inflight jobs to be cancelled and workflow-owned - clusters to be deleted. - - The [Operation.metadata][google.longrunning.Operation.metadata] will be - [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata). - Also see [Using - WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata). - - On successful completion, - [Operation.response][google.longrunning.Operation.response] will be - [Empty][google.protobuf.Empty]. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def UpdateWorkflowTemplate(self, request, context): - """Updates (replaces) workflow template. The updated template - must contain version that matches the current server version. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def ListWorkflowTemplates(self, request, context): - """Lists workflows that match the specified filter in the request. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - def DeleteWorkflowTemplate(self, request, context): - """Deletes a workflow template. It does not cancel in-progress workflows. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details("Method not implemented!") - raise NotImplementedError("Method not implemented!") - - -def add_WorkflowTemplateServiceServicer_to_server(servicer, server): - rpc_method_handlers = { - "CreateWorkflowTemplate": grpc.unary_unary_rpc_method_handler( - servicer.CreateWorkflowTemplate, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.CreateWorkflowTemplateRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.SerializeToString, - ), - "GetWorkflowTemplate": grpc.unary_unary_rpc_method_handler( - servicer.GetWorkflowTemplate, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.GetWorkflowTemplateRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.SerializeToString, - ), - "InstantiateWorkflowTemplate": grpc.unary_unary_rpc_method_handler( - servicer.InstantiateWorkflowTemplate, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateWorkflowTemplateRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "InstantiateInlineWorkflowTemplate": grpc.unary_unary_rpc_method_handler( - servicer.InstantiateInlineWorkflowTemplate, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateInlineWorkflowTemplateRequest.FromString, - response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, - ), - "UpdateWorkflowTemplate": grpc.unary_unary_rpc_method_handler( - servicer.UpdateWorkflowTemplate, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.UpdateWorkflowTemplateRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.SerializeToString, - ), - "ListWorkflowTemplates": grpc.unary_unary_rpc_method_handler( - servicer.ListWorkflowTemplates, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesRequest.FromString, - response_serializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesResponse.SerializeToString, - ), - "DeleteWorkflowTemplate": grpc.unary_unary_rpc_method_handler( - servicer.DeleteWorkflowTemplate, - request_deserializer=google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.DeleteWorkflowTemplateRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - "google.cloud.dataproc.v1beta2.WorkflowTemplateService", rpc_method_handlers - ) - server.add_generic_rpc_handlers((generic_handler,)) - - -# This class is part of an EXPERIMENTAL API. -class WorkflowTemplateService(object): - """The API interface for managing Workflow Templates in the - Dataproc API. - """ - - @staticmethod - def CreateWorkflowTemplate( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.CreateWorkflowTemplateRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def GetWorkflowTemplate( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.GetWorkflowTemplateRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def InstantiateWorkflowTemplate( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateWorkflowTemplateRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def InstantiateInlineWorkflowTemplate( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.InstantiateInlineWorkflowTemplateRequest.SerializeToString, - google_dot_longrunning_dot_operations__pb2.Operation.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def UpdateWorkflowTemplate( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.UpdateWorkflowTemplateRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.WorkflowTemplate.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def ListWorkflowTemplates( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesRequest.SerializeToString, - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.ListWorkflowTemplatesResponse.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) - - @staticmethod - def DeleteWorkflowTemplate( - request, - target, - options=(), - channel_credentials=None, - call_credentials=None, - compression=None, - wait_for_ready=None, - timeout=None, - metadata=None, - ): - return grpc.experimental.unary_unary( - request, - target, - "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate", - google_dot_cloud_dot_dataproc__v1beta2_dot_proto_dot_workflow__templates__pb2.DeleteWorkflowTemplateRequest.SerializeToString, - google_dot_protobuf_dot_empty__pb2.Empty.FromString, - options, - channel_credentials, - call_credentials, - compression, - wait_for_ready, - timeout, - metadata, - ) diff --git a/google/cloud/dataproc_v1beta2/py.typed b/google/cloud/dataproc_v1beta2/py.typed new file mode 100644 index 00000000..aac99cba --- /dev/null +++ b/google/cloud/dataproc_v1beta2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-dataproc package uses inline types. diff --git a/google/cloud/dataproc_v1beta2/services/__init__.py b/google/cloud/dataproc_v1beta2/services/__init__.py new file mode 100644 index 00000000..42ffdf2b --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/__init__.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py new file mode 100644 index 00000000..e33cbc43 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import AutoscalingPolicyServiceClient +from .async_client import AutoscalingPolicyServiceAsyncClient + +__all__ = ( + "AutoscalingPolicyServiceClient", + "AutoscalingPolicyServiceAsyncClient", +) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py new file mode 100644 index 00000000..36274045 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/async_client.py @@ -0,0 +1,552 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1beta2.types import autoscaling_policies + +from .transports.base import AutoscalingPolicyServiceTransport +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport +from .client import AutoscalingPolicyServiceClient + + +class AutoscalingPolicyServiceAsyncClient: + """The API interface for managing autoscaling policies in the + Cloud Dataproc API. + """ + + _client: AutoscalingPolicyServiceClient + + DEFAULT_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = AutoscalingPolicyServiceClient.DEFAULT_MTLS_ENDPOINT + + autoscaling_policy_path = staticmethod( + AutoscalingPolicyServiceClient.autoscaling_policy_path + ) + + from_service_account_file = AutoscalingPolicyServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(AutoscalingPolicyServiceClient).get_transport_class, + type(AutoscalingPolicyServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AutoscalingPolicyServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = AutoscalingPolicyServiceClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def create_autoscaling_policy( + self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + The request object. A request to create an autoscaling + policy. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The autoscaling policy to + create. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def update_autoscaling_policy( + self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + The request object. A request to update an autoscaling + policy. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The updated autoscaling + policy. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([policy]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("policy.name", request.policy.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_autoscaling_policy( + self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + The request object. A request to fetch an autoscaling + policy. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_autoscaling_policies( + self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesAsyncPager: + r"""Lists autoscaling policies in the project. + + Args: + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The request object. A request to list autoscaling + policies in a project. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListAutoscalingPoliciesAsyncPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_autoscaling_policies, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAutoscalingPoliciesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_autoscaling_policy( + self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoscalingPolicyServiceAsyncClient",) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py new file mode 100644 index 00000000..5bbc3b2d --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/client.py @@ -0,0 +1,687 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1beta2.types import autoscaling_policies + +from .transports.base import AutoscalingPolicyServiceTransport +from .transports.grpc import AutoscalingPolicyServiceGrpcTransport +from .transports.grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +class AutoscalingPolicyServiceClientMeta(type): + """Metaclass for the AutoscalingPolicyService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] + _transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport + _transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[AutoscalingPolicyServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoscalingPolicyServiceClient(metaclass=AutoscalingPolicyServiceClientMeta): + """The API interface for managing autoscaling policies in the + Cloud Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def autoscaling_policy_path( + project: str, location: str, autoscaling_policy: str, + ) -> str: + """Return a fully-qualified autoscaling_policy string.""" + return "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format( + project=project, location=location, autoscaling_policy=autoscaling_policy, + ) + + @staticmethod + def parse_autoscaling_policy_path(path: str) -> Dict[str, str]: + """Parse a autoscaling_policy path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/autoscalingPolicies/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, AutoscalingPolicyServiceTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the autoscaling policy service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.AutoscalingPolicyServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoscalingPolicyServiceTransport): + # transport is a AutoscalingPolicyServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def create_autoscaling_policy( + self, + request: autoscaling_policies.CreateAutoscalingPolicyRequest = None, + *, + parent: str = None, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Creates new autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.CreateAutoscalingPolicyRequest`): + The request object. A request to create an autoscaling + policy. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The autoscaling policy to + create. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, policy]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.CreateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.CreateAutoscalingPolicyRequest): + request = autoscaling_policies.CreateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_autoscaling_policy + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def update_autoscaling_policy( + self, + request: autoscaling_policies.UpdateAutoscalingPolicyRequest = None, + *, + policy: autoscaling_policies.AutoscalingPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Args: + request (:class:`~.autoscaling_policies.UpdateAutoscalingPolicyRequest`): + The request object. A request to update an autoscaling + policy. + policy (:class:`~.autoscaling_policies.AutoscalingPolicy`): + Required. The updated autoscaling + policy. + This corresponds to the ``policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([policy]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.UpdateAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.UpdateAutoscalingPolicyRequest): + request = autoscaling_policies.UpdateAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if policy is not None: + request.policy = policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_autoscaling_policy + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("policy.name", request.policy.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_autoscaling_policy( + self, + request: autoscaling_policies.GetAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> autoscaling_policies.AutoscalingPolicy: + r"""Retrieves autoscaling policy. + + Args: + request (:class:`~.autoscaling_policies.GetAutoscalingPolicyRequest`): + The request object. A request to fetch an autoscaling + policy. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.autoscaling_policies.AutoscalingPolicy: + Describes an autoscaling policy for + Dataproc cluster autoscaler. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.GetAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.GetAutoscalingPolicyRequest): + request = autoscaling_policies.GetAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_autoscaling_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_autoscaling_policies( + self, + request: autoscaling_policies.ListAutoscalingPoliciesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAutoscalingPoliciesPager: + r"""Lists autoscaling policies in the project. + + Args: + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The request object. A request to list autoscaling + policies in a project. + parent (:class:`str`): + Required. The "resource name" of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListAutoscalingPoliciesPager: + A response to a request to list + autoscaling policies in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.ListAutoscalingPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.ListAutoscalingPoliciesRequest): + request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_autoscaling_policies + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAutoscalingPoliciesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_autoscaling_policy( + self, + request: autoscaling_policies.DeleteAutoscalingPolicyRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Args: + request (:class:`~.autoscaling_policies.DeleteAutoscalingPolicyRequest`): + The request object. A request to delete an autoscaling + policy. + Autoscaling policies in use by one or more clusters will + not be deleted. + name (:class:`str`): + Required. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For + ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following + format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a autoscaling_policies.DeleteAutoscalingPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, autoscaling_policies.DeleteAutoscalingPolicyRequest): + request = autoscaling_policies.DeleteAutoscalingPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_autoscaling_policy + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("AutoscalingPolicyServiceClient",) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py new file mode 100644 index 00000000..4a9a6942 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/pagers.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies + + +class ListAutoscalingPoliciesPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., autoscaling_policies.ListAutoscalingPoliciesResponse], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The initial request object. + response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[autoscaling_policies.AutoscalingPolicy]: + for page in self.pages: + yield from page.policies + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListAutoscalingPoliciesAsyncPager: + """A pager for iterating through ``list_autoscaling_policies`` requests. + + This class thinly wraps an initial + :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``policies`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAutoscalingPolicies`` requests and continue to iterate + through the ``policies`` field on the + corresponding responses. + + All the usual :class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse] + ], + request: autoscaling_policies.ListAutoscalingPoliciesRequest, + response: autoscaling_policies.ListAutoscalingPoliciesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.autoscaling_policies.ListAutoscalingPoliciesRequest`): + The initial request object. + response (:class:`~.autoscaling_policies.ListAutoscalingPoliciesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = autoscaling_policies.ListAutoscalingPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[autoscaling_policies.ListAutoscalingPoliciesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[autoscaling_policies.AutoscalingPolicy]: + async def async_generator(): + async for page in self.pages: + for response in page.policies: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py new file mode 100644 index 00000000..8a17b892 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoscalingPolicyServiceTransport +from .grpc import AutoscalingPolicyServiceGrpcTransport +from .grpc_asyncio import AutoscalingPolicyServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[AutoscalingPolicyServiceTransport]] +_transport_registry["grpc"] = AutoscalingPolicyServiceGrpcTransport +_transport_registry["grpc_asyncio"] = AutoscalingPolicyServiceGrpcAsyncIOTransport + + +__all__ = ( + "AutoscalingPolicyServiceTransport", + "AutoscalingPolicyServiceGrpcTransport", + "AutoscalingPolicyServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py new file mode 100644 index 00000000..f4da8ccb --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/base.py @@ -0,0 +1,211 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class AutoscalingPolicyServiceTransport(abc.ABC): + """Abstract transport class for AutoscalingPolicyService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_autoscaling_policy: gapic_v1.method.wrap_method( + self.create_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ), + self.update_autoscaling_policy: gapic_v1.method.wrap_method( + self.update_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.get_autoscaling_policy: gapic_v1.method.wrap_method( + self.get_autoscaling_policy, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.list_autoscaling_policies: gapic_v1.method.wrap_method( + self.list_autoscaling_policies, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.delete_autoscaling_policy: gapic_v1.method.wrap_method( + self.delete_autoscaling_policy, + default_timeout=600.0, + client_info=_client_info, + ), + } + + @property + def create_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + typing.Union[ + autoscaling_policies.AutoscalingPolicy, + typing.Awaitable[autoscaling_policies.AutoscalingPolicy], + ], + ]: + raise NotImplementedError() + + @property + def update_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + typing.Union[ + autoscaling_policies.AutoscalingPolicy, + typing.Awaitable[autoscaling_policies.AutoscalingPolicy], + ], + ]: + raise NotImplementedError() + + @property + def get_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + typing.Union[ + autoscaling_policies.AutoscalingPolicy, + typing.Awaitable[autoscaling_policies.AutoscalingPolicy], + ], + ]: + raise NotImplementedError() + + @property + def list_autoscaling_policies( + self, + ) -> typing.Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + typing.Union[ + autoscaling_policies.ListAutoscalingPoliciesResponse, + typing.Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_autoscaling_policy( + self, + ) -> typing.Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("AutoscalingPolicyServiceTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py new file mode 100644 index 00000000..f1b5b894 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import AutoscalingPolicyServiceTransport + + +class AutoscalingPolicyServiceGrpcTransport(AutoscalingPolicyServiceTransport): + """gRPC backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Cloud Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def create_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy, + ]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_autoscaling_policy" not in self._stubs: + self._stubs["create_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy", + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["create_autoscaling_policy"] + + @property + def update_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy, + ]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_autoscaling_policy" not in self._stubs: + self._stubs["update_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy", + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["update_autoscaling_policy"] + + @property + def get_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + autoscaling_policies.AutoscalingPolicy, + ]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + ~.AutoscalingPolicy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_autoscaling_policy" not in self._stubs: + self._stubs["get_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy", + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["get_autoscaling_policy"] + + @property + def list_autoscaling_policies( + self, + ) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + autoscaling_policies.ListAutoscalingPoliciesResponse, + ]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + ~.ListAutoscalingPoliciesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_autoscaling_policies" not in self._stubs: + self._stubs["list_autoscaling_policies"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies", + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs["list_autoscaling_policies"] + + @property + def delete_autoscaling_policy( + self, + ) -> Callable[[autoscaling_policies.DeleteAutoscalingPolicyRequest], empty.Empty]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_autoscaling_policy" not in self._stubs: + self._stubs["delete_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy", + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_autoscaling_policy"] + + +__all__ = ("AutoscalingPolicyServiceGrpcTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..fa17bb26 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -0,0 +1,351 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import AutoscalingPolicyServiceTransport +from .grpc import AutoscalingPolicyServiceGrpcTransport + + +class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTransport): + """gRPC AsyncIO backend transport for AutoscalingPolicyService. + + The API interface for managing autoscaling policies in the + Cloud Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def create_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.CreateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy], + ]: + r"""Return a callable for the create autoscaling policy method over gRPC. + + Creates new autoscaling policy. + + Returns: + Callable[[~.CreateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_autoscaling_policy" not in self._stubs: + self._stubs["create_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/CreateAutoscalingPolicy", + request_serializer=autoscaling_policies.CreateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["create_autoscaling_policy"] + + @property + def update_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.UpdateAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy], + ]: + r"""Return a callable for the update autoscaling policy method over gRPC. + + Updates (replaces) autoscaling policy. + + Disabled check for update_mask, because all updates will be full + replacements. + + Returns: + Callable[[~.UpdateAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_autoscaling_policy" not in self._stubs: + self._stubs["update_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/UpdateAutoscalingPolicy", + request_serializer=autoscaling_policies.UpdateAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["update_autoscaling_policy"] + + @property + def get_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.GetAutoscalingPolicyRequest], + Awaitable[autoscaling_policies.AutoscalingPolicy], + ]: + r"""Return a callable for the get autoscaling policy method over gRPC. + + Retrieves autoscaling policy. + + Returns: + Callable[[~.GetAutoscalingPolicyRequest], + Awaitable[~.AutoscalingPolicy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_autoscaling_policy" not in self._stubs: + self._stubs["get_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/GetAutoscalingPolicy", + request_serializer=autoscaling_policies.GetAutoscalingPolicyRequest.serialize, + response_deserializer=autoscaling_policies.AutoscalingPolicy.deserialize, + ) + return self._stubs["get_autoscaling_policy"] + + @property + def list_autoscaling_policies( + self, + ) -> Callable[ + [autoscaling_policies.ListAutoscalingPoliciesRequest], + Awaitable[autoscaling_policies.ListAutoscalingPoliciesResponse], + ]: + r"""Return a callable for the list autoscaling policies method over gRPC. + + Lists autoscaling policies in the project. + + Returns: + Callable[[~.ListAutoscalingPoliciesRequest], + Awaitable[~.ListAutoscalingPoliciesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_autoscaling_policies" not in self._stubs: + self._stubs["list_autoscaling_policies"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/ListAutoscalingPolicies", + request_serializer=autoscaling_policies.ListAutoscalingPoliciesRequest.serialize, + response_deserializer=autoscaling_policies.ListAutoscalingPoliciesResponse.deserialize, + ) + return self._stubs["list_autoscaling_policies"] + + @property + def delete_autoscaling_policy( + self, + ) -> Callable[ + [autoscaling_policies.DeleteAutoscalingPolicyRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete autoscaling policy method over gRPC. + + Deletes an autoscaling policy. It is an error to + delete an autoscaling policy that is in use by one or + more clusters. + + Returns: + Callable[[~.DeleteAutoscalingPolicyRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_autoscaling_policy" not in self._stubs: + self._stubs["delete_autoscaling_policy"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.AutoscalingPolicyService/DeleteAutoscalingPolicy", + request_serializer=autoscaling_policies.DeleteAutoscalingPolicyRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_autoscaling_policy"] + + +__all__ = ("AutoscalingPolicyServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py new file mode 100644 index 00000000..99ce2997 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import ClusterControllerClient +from .async_client import ClusterControllerAsyncClient + +__all__ = ( + "ClusterControllerClient", + "ClusterControllerAsyncClient", +) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py new file mode 100644 index 00000000..9a5af0d9 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/async_client.py @@ -0,0 +1,855 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import operations +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import ClusterControllerTransport +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport +from .client import ClusterControllerClient + + +class ClusterControllerAsyncClient: + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + _client: ClusterControllerClient + + DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = ClusterControllerClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(ClusterControllerClient).get_transport_class, type(ClusterControllerClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = ClusterControllerClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def create_cluster( + self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.CreateClusterRequest`): + The request object. A request to create a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_cluster( + self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.UpdateClusterRequest`): + The request object. A request to update a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker + group
config.secondary_worker_config.num_instancesResize secondary + worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL + duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL + deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL + duration
config.autoscaling_config.policy_uriUse, stop using, or change + autoscaling policies
+ This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any( + [project_id, region, cluster_name, cluster, update_mask] + ): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster( + self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.DeleteClusterRequest`): + The request object. A request to delete a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster_name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster( + self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (:class:`~.clusters.GetClusterRequest`): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.clusters.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster_name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_clusters( + self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersAsyncPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (:class:`~.clusters.ListClustersRequest`): + The request object. A request to list the clusters in a + project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListClustersAsyncPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, filter]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListClustersAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def diagnose_cluster( + self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.clusters.DiagnoseClusterRequest`): + The request object. A request to collect cluster + diagnostic information. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, cluster_name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = clusters.DiagnoseClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.diagnose_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py new file mode 100644 index 00000000..341e9622 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/client.py @@ -0,0 +1,951 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import operations +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore + +from .transports.base import ClusterControllerTransport +from .transports.grpc import ClusterControllerGrpcTransport +from .transports.grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +class ClusterControllerClientMeta(type): + """Metaclass for the ClusterController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ClusterControllerTransport]] + _transport_registry["grpc"] = ClusterControllerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[ClusterControllerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterControllerClient(metaclass=ClusterControllerClientMeta): + """The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, ClusterControllerTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the cluster controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterControllerTransport): + # transport is a ClusterControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def create_cluster( + self, + request: clusters.CreateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster: clusters.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.CreateClusterRequest`): + The request object. A request to create a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The cluster to create. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.CreateClusterRequest): + request = clusters.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def update_cluster( + self, + request: clusters.UpdateClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + cluster: clusters.Cluster = None, + update_mask: field_mask.FieldMask = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.UpdateClusterRequest`): + The request object. A request to update a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project the cluster belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`~.clusters.Cluster`): + Required. The changes to the cluster. + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`~.field_mask.FieldMask`): + Required. Specifies the path, relative to ``Cluster``, + of the field to update. For example, to change the + number of workers in a cluster to 5, the ``update_mask`` + parameter would be specified as + ``config.worker_config.num_instances``, and the + ``PATCH`` request body would specify the new value, as + follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers + in a cluster to 5, the ``update_mask`` parameter would + be ``config.secondary_worker_config.num_instances``, and + the ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be + updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker + group
config.secondary_worker_config.num_instancesResize secondary + worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL + duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL + deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL + duration
config.autoscaling_config.policy_uriUse, stop using, or change + autoscaling policies
+ This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.clusters.Cluster``: Describes the identifying + information, config, and status of a cluster of Compute + Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [project_id, region, cluster_name, cluster, update_mask] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.UpdateClusterRequest): + request = clusters.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.Cluster, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster( + self, + request: clusters.DeleteClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Args: + request (:class:`~.clusters.DeleteClusterRequest`): + The request object. A request to delete a cluster. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DeleteClusterRequest): + request = clusters.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + def get_cluster( + self, + request: clusters.GetClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.Cluster: + r"""Gets the resource representation for a cluster in a + project. + + Args: + request (:class:`~.clusters.GetClusterRequest`): + The request object. Request to get the resource + representation for a cluster in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.clusters.Cluster: + Describes the identifying + information, config, and status of a + cluster of Compute Engine instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.GetClusterRequest): + request = clusters.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_clusters( + self, + request: clusters.ListClustersRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListClustersPager: + r"""Lists all regions/{region}/clusters in a project + alphabetically. + + Args: + request (:class:`~.clusters.ListClustersRequest`): + The request object. A request to list the clusters in a + project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following + syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, + ``clusterName``, or ``labels.[KEY]``, and ``[KEY]`` is a + label key. **value** can be ``*`` to match all values. + ``status.state`` can be one of the following: + ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, + ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE`` + contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` + states. ``INACTIVE`` contains the ``DELETING`` and + ``ERROR`` states. ``clusterName`` is the name of the + cluster provided at creation time. Only the logical + ``AND`` operator is supported; space-separated items are + treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListClustersPager: + The list of all clusters in a + project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.ListClustersRequest): + request = clusters.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListClustersPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def diagnose_cluster( + self, + request: clusters.DiagnoseClusterRequest = None, + *, + project_id: str = None, + region: str = None, + cluster_name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.clusters.DiagnoseClusterRequest`): + The request object. A request to collect cluster + diagnostic information. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the cluster + belongs to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_name (:class:`str`): + Required. The cluster name. + This corresponds to the ``cluster_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, cluster_name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a clusters.DiagnoseClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, clusters.DiagnoseClusterRequest): + request = clusters.DiagnoseClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if cluster_name is not None: + request.cluster_name = cluster_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.diagnose_cluster] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=operations.ClusterOperationMetadata, + ) + + # Done; return the response. + return response + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("ClusterControllerClient",) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py new file mode 100644 index 00000000..d7c6c416 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/pagers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1beta2.types import clusters + + +class ListClustersPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`~.clusters.ListClustersResponse` object, and + provides an ``__iter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`~.clusters.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., clusters.ListClustersResponse], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.clusters.ListClustersRequest`): + The initial request object. + response (:class:`~.clusters.ListClustersResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[clusters.Cluster]: + for page in self.pages: + yield from page.clusters + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListClustersAsyncPager: + """A pager for iterating through ``list_clusters`` requests. + + This class thinly wraps an initial + :class:`~.clusters.ListClustersResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``clusters`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListClusters`` requests and continue to iterate + through the ``clusters`` field on the + corresponding responses. + + All the usual :class:`~.clusters.ListClustersResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[clusters.ListClustersResponse]], + request: clusters.ListClustersRequest, + response: clusters.ListClustersResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.clusters.ListClustersRequest`): + The initial request object. + response (:class:`~.clusters.ListClustersResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = clusters.ListClustersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[clusters.ListClustersResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[clusters.Cluster]: + async def async_generator(): + async for page in self.pages: + for response in page.clusters: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py new file mode 100644 index 00000000..9aa597b6 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterControllerTransport +from .grpc import ClusterControllerGrpcTransport +from .grpc_asyncio import ClusterControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterControllerTransport]] +_transport_registry["grpc"] = ClusterControllerGrpcTransport +_transport_registry["grpc_asyncio"] = ClusterControllerGrpcAsyncIOTransport + + +__all__ = ( + "ClusterControllerTransport", + "ClusterControllerGrpcTransport", + "ClusterControllerGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py new file mode 100644 index 00000000..864494b6 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/base.py @@ -0,0 +1,242 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.longrunning import operations_pb2 as operations # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class ClusterControllerTransport(abc.ABC): + """Abstract transport class for ClusterController.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=300.0, + client_info=_client_info, + ), + self.diagnose_cluster: gapic_v1.method.wrap_method( + self.diagnose_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=300.0, + client_info=_client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_cluster( + self, + ) -> typing.Callable[ + [clusters.CreateClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def update_cluster( + self, + ) -> typing.Callable[ + [clusters.UpdateClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def delete_cluster( + self, + ) -> typing.Callable[ + [clusters.DeleteClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_cluster( + self, + ) -> typing.Callable[ + [clusters.GetClusterRequest], + typing.Union[clusters.Cluster, typing.Awaitable[clusters.Cluster]], + ]: + raise NotImplementedError() + + @property + def list_clusters( + self, + ) -> typing.Callable[ + [clusters.ListClustersRequest], + typing.Union[ + clusters.ListClustersResponse, + typing.Awaitable[clusters.ListClustersResponse], + ], + ]: + raise NotImplementedError() + + @property + def diagnose_cluster( + self, + ) -> typing.Callable[ + [clusters.DiagnoseClusterRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + +__all__ = ("ClusterControllerTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py new file mode 100644 index 00000000..abb5622f --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc.py @@ -0,0 +1,397 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import ClusterControllerTransport + + +class ClusterControllerGrpcTransport(ClusterControllerTransport): + """gRPC backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_cluster( + self, + ) -> Callable[[clusters.CreateClusterRequest], operations.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[[clusters.UpdateClusterRequest], operations.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[[clusters.DeleteClusterRequest], operations.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def get_cluster(self) -> Callable[[clusters.GetClusterRequest], clusters.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[[clusters.ListClustersRequest], clusters.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def diagnose_cluster( + self, + ) -> Callable[[clusters.DiagnoseClusterRequest], operations.Operation]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.DiagnoseClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "diagnose_cluster" not in self._stubs: + self._stubs["diagnose_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["diagnose_cluster"] + + +__all__ = ("ClusterControllerGrpcTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..4d778267 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/cluster_controller/transports/grpc_asyncio.py @@ -0,0 +1,394 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import clusters +from google.longrunning import operations_pb2 as operations # type: ignore + +from .base import ClusterControllerTransport +from .grpc import ClusterControllerGrpcTransport + + +class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): + """gRPC AsyncIO backend transport for ClusterController. + + The ClusterControllerService provides methods to manage + clusters of Compute Engine instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_cluster( + self, + ) -> Callable[[clusters.CreateClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_cluster" not in self._stubs: + self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", + request_serializer=clusters.CreateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["create_cluster"] + + @property + def update_cluster( + self, + ) -> Callable[[clusters.UpdateClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_cluster" not in self._stubs: + self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", + request_serializer=clusters.UpdateClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["update_cluster"] + + @property + def delete_cluster( + self, + ) -> Callable[[clusters.DeleteClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster in a project. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_cluster" not in self._stubs: + self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", + request_serializer=clusters.DeleteClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["delete_cluster"] + + @property + def get_cluster( + self, + ) -> Callable[[clusters.GetClusterRequest], Awaitable[clusters.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the resource representation for a cluster in a + project. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_cluster" not in self._stubs: + self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", + request_serializer=clusters.GetClusterRequest.serialize, + response_deserializer=clusters.Cluster.deserialize, + ) + return self._stubs["get_cluster"] + + @property + def list_clusters( + self, + ) -> Callable[ + [clusters.ListClustersRequest], Awaitable[clusters.ListClustersResponse] + ]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all regions/{region}/clusters in a project + alphabetically. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_clusters" not in self._stubs: + self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", + request_serializer=clusters.ListClustersRequest.serialize, + response_deserializer=clusters.ListClustersResponse.deserialize, + ) + return self._stubs["list_clusters"] + + @property + def diagnose_cluster( + self, + ) -> Callable[[clusters.DiagnoseClusterRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the diagnose cluster method over gRPC. + + Gets cluster diagnostic information. The returned + [Operation.metadata][google.longrunning.Operation.metadata] will + be + `ClusterOperationMetadata `__. + After the operation completes, + [Operation.response][google.longrunning.Operation.response] + contains [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.DiagnoseClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "diagnose_cluster" not in self._stubs: + self._stubs["diagnose_cluster"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", + request_serializer=clusters.DiagnoseClusterRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["diagnose_cluster"] + + +__all__ = ("ClusterControllerGrpcAsyncIOTransport",) diff --git a/google/cloud/__init__.py b/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py similarity index 71% rename from google/cloud/__init__.py rename to google/cloud/dataproc_v1beta2/services/job_controller/__init__.py index 9a1b64a6..5bb83207 100644 --- a/google/cloud/__init__.py +++ b/google/cloud/dataproc_v1beta2/services/job_controller/__init__.py @@ -1,24 +1,24 @@ # -*- coding: utf-8 -*- -# + # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# https://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - import pkgutil +from .client import JobControllerClient +from .async_client import JobControllerAsyncClient - __path__ = pkgutil.extend_path(__path__, __name__) +__all__ = ( + "JobControllerClient", + "JobControllerAsyncClient", +) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py b/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py new file mode 100644 index 00000000..b83e2612 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/async_client.py @@ -0,0 +1,722 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1beta2.services.job_controller import pagers +from google.cloud.dataproc_v1beta2.types import jobs + +from .transports.base import JobControllerTransport +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport +from .client import JobControllerClient + + +class JobControllerAsyncClient: + """The JobController provides methods to manage jobs.""" + + _client: JobControllerClient + + DEFAULT_ENDPOINT = JobControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = JobControllerClient.DEFAULT_MTLS_ENDPOINT + + from_service_account_file = JobControllerClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(JobControllerClient).get_transport_class, type(JobControllerClient) + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, JobControllerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = JobControllerClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def submit_job( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def submit_job_as_operation( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Submits job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.jobs.Job``: A Dataproc job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.submit_job_as_operation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + async def get_job( + self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (:class:`~.jobs.GetJobRequest`): + The request object. A request to get the resource + representation for a job in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.GetJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_jobs( + self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsAsyncPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (:class:`~.jobs.ListJobsRequest`): + The request object. A request to list jobs in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListJobsAsyncPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, filter]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.ListJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_jobs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListJobsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_job( + self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (:class:`~.jobs.UpdateJobRequest`): + The request object. A request to update a job. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def cancel_job( + self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (:class:`~.jobs.CancelJobRequest`): + The request object. A request to cancel a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.CancelJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def delete_job( + self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (:class:`~.jobs.DeleteJobRequest`): + The request object. A request to delete a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([project_id, region, job_id]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = jobs.DeleteJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("JobControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/client.py b/google/cloud/dataproc_v1beta2/services/job_controller/client.py new file mode 100644 index 00000000..e34798cd --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/client.py @@ -0,0 +1,805 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1beta2.services.job_controller import pagers +from google.cloud.dataproc_v1beta2.types import jobs + +from .transports.base import JobControllerTransport +from .transports.grpc import JobControllerGrpcTransport +from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +class JobControllerClientMeta(type): + """Metaclass for the JobController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] + _transport_registry["grpc"] = JobControllerGrpcTransport + _transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport + + def get_transport_class(cls, label: str = None,) -> Type[JobControllerTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class JobControllerClient(metaclass=JobControllerClientMeta): + """The JobController provides methods to manage jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, JobControllerTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the job controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.JobControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, JobControllerTransport): + # transport is a JobControllerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def submit_job( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Submits a job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def submit_job_as_operation( + self, + request: jobs.SubmitJobRequest = None, + *, + project_id: str = None, + region: str = None, + job: jobs.Job = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Submits job to a cluster. + + Args: + request (:class:`~.jobs.SubmitJobRequest`): + The request object. A request to submit a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job (:class:`~.jobs.Job`): + Required. The job resource. + This corresponds to the ``job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.jobs.Job``: A Dataproc job resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.SubmitJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.SubmitJobRequest): + request = jobs.SubmitJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job is not None: + request.job = job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + jobs.Job, + metadata_type=jobs.JobMetadata, + ) + + # Done; return the response. + return response + + def get_job( + self, + request: jobs.GetJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Gets the resource representation for a job in a + project. + + Args: + request (:class:`~.jobs.GetJobRequest`): + The request object. A request to get the resource + representation for a job in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.GetJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.GetJobRequest): + request = jobs.GetJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_jobs( + self, + request: jobs.ListJobsRequest = None, + *, + project_id: str = None, + region: str = None, + filter: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListJobsPager: + r"""Lists regions/{region}/jobs in a project. + + Args: + request (:class:`~.jobs.ListJobsRequest`): + The request object. A request to list jobs in a project. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + filter (:class:`str`): + Optional. A filter constraining the jobs to list. + Filters are case-sensitive and have the following + syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, + and ``[KEY]`` is a label key. **value** can be ``*`` to + match all values. ``status.state`` can be either + ``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND`` + operator is supported; space-separated items are treated + as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + This corresponds to the ``filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListJobsPager: + A list of jobs in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, filter]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.ListJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.ListJobsRequest): + request = jobs.ListJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if filter is not None: + request.filter = filter + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_jobs] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListJobsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def update_job( + self, + request: jobs.UpdateJobRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Updates a job in a project. + + Args: + request (:class:`~.jobs.UpdateJobRequest`): + The request object. A request to update a job. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.UpdateJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.UpdateJobRequest): + request = jobs.UpdateJobRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def cancel_job( + self, + request: jobs.CancelJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> jobs.Job: + r"""Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Args: + request (:class:`~.jobs.CancelJobRequest`): + The request object. A request to cancel a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.jobs.Job: + A Dataproc job resource. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.CancelJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.CancelJobRequest): + request = jobs.CancelJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_job] + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def delete_job( + self, + request: jobs.DeleteJobRequest = None, + *, + project_id: str = None, + region: str = None, + job_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Args: + request (:class:`~.jobs.DeleteJobRequest`): + The request object. A request to delete a job. + project_id (:class:`str`): + Required. The ID of the Google Cloud + Platform project that the job belongs + to. + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (:class:`str`): + Required. The Dataproc region in + which to handle the request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + job_id (:class:`str`): + Required. The job ID. + This corresponds to the ``job_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, region, job_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a jobs.DeleteJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, jobs.DeleteJobRequest): + request = jobs.DeleteJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if region is not None: + request.region = region + if job_id is not None: + request.job_id = job_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_job] + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("JobControllerClient",) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py b/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py new file mode 100644 index 00000000..98cd30cb --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/pagers.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1beta2.types import jobs + + +class ListJobsPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`~.jobs.ListJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`~.jobs.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., jobs.ListJobsResponse], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.jobs.ListJobsRequest`): + The initial request object. + response (:class:`~.jobs.ListJobsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[jobs.Job]: + for page in self.pages: + yield from page.jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListJobsAsyncPager: + """A pager for iterating through ``list_jobs`` requests. + + This class thinly wraps an initial + :class:`~.jobs.ListJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListJobs`` requests and continue to iterate + through the ``jobs`` field on the + corresponding responses. + + All the usual :class:`~.jobs.ListJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[jobs.ListJobsResponse]], + request: jobs.ListJobsRequest, + response: jobs.ListJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.jobs.ListJobsRequest`): + The initial request object. + response (:class:`~.jobs.ListJobsResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = jobs.ListJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[jobs.ListJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[jobs.Job]: + async def async_generator(): + async for page in self.pages: + for response in page.jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py new file mode 100644 index 00000000..a3d68663 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/__init__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import JobControllerTransport +from .grpc import JobControllerGrpcTransport +from .grpc_asyncio import JobControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]] +_transport_registry["grpc"] = JobControllerGrpcTransport +_transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport + + +__all__ = ( + "JobControllerTransport", + "JobControllerGrpcTransport", + "JobControllerGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py new file mode 100644 index 00000000..99a86d34 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/base.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class JobControllerTransport(abc.ABC): + """Abstract transport class for JobController.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.submit_job: gapic_v1.method.wrap_method( + self.submit_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.submit_job_as_operation: gapic_v1.method.wrap_method( + self.submit_job_as_operation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.get_job: gapic_v1.method.wrap_method( + self.get_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.list_jobs: gapic_v1.method.wrap_method( + self.list_jobs, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.update_job: gapic_v1.method.wrap_method( + self.update_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.cancel_job: gapic_v1.method.wrap_method( + self.cancel_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=900.0, + client_info=_client_info, + ), + self.delete_job: gapic_v1.method.wrap_method( + self.delete_job, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=900.0, + client_info=_client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def submit_job( + self, + ) -> typing.Callable[ + [jobs.SubmitJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def submit_job_as_operation( + self, + ) -> typing.Callable[ + [jobs.SubmitJobRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def get_job( + self, + ) -> typing.Callable[ + [jobs.GetJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def list_jobs( + self, + ) -> typing.Callable[ + [jobs.ListJobsRequest], + typing.Union[jobs.ListJobsResponse, typing.Awaitable[jobs.ListJobsResponse]], + ]: + raise NotImplementedError() + + @property + def update_job( + self, + ) -> typing.Callable[ + [jobs.UpdateJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def cancel_job( + self, + ) -> typing.Callable[ + [jobs.CancelJobRequest], typing.Union[jobs.Job, typing.Awaitable[jobs.Job]] + ]: + raise NotImplementedError() + + @property + def delete_job( + self, + ) -> typing.Callable[ + [jobs.DeleteJobRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("JobControllerTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py new file mode 100644 index 00000000..19aa92cc --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc.py @@ -0,0 +1,402 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import JobControllerTransport + + +class JobControllerGrpcTransport(JobControllerTransport): + """gRPC backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def submit_job(self) -> Callable[[jobs.SubmitJobRequest], jobs.Job]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job" not in self._stubs: + self._stubs["submit_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["submit_job"] + + @property + def submit_job_as_operation( + self, + ) -> Callable[[jobs.SubmitJobRequest], operations.Operation]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job_as_operation" not in self._stubs: + self._stubs["submit_job_as_operation"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["submit_job_as_operation"] + + @property + def get_job(self) -> Callable[[jobs.GetJobRequest], jobs.Job]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_job" not in self._stubs: + self._stubs["get_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/GetJob", + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["get_job"] + + @property + def list_jobs(self) -> Callable[[jobs.ListJobsRequest], jobs.ListJobsResponse]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + ~.ListJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_jobs" not in self._stubs: + self._stubs["list_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/ListJobs", + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs["list_jobs"] + + @property + def update_job(self) -> Callable[[jobs.UpdateJobRequest], jobs.Job]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_job" not in self._stubs: + self._stubs["update_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["update_job"] + + @property + def cancel_job(self) -> Callable[[jobs.CancelJobRequest], jobs.Job]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + ~.Job]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_job" not in self._stubs: + self._stubs["cancel_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/CancelJob", + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["cancel_job"] + + @property + def delete_job(self) -> Callable[[jobs.DeleteJobRequest], empty.Empty]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_job" not in self._stubs: + self._stubs["delete_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_job"] + + +__all__ = ("JobControllerGrpcTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..fcc056ab --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/job_controller/transports/grpc_asyncio.py @@ -0,0 +1,397 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import jobs +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import JobControllerTransport +from .grpc import JobControllerGrpcTransport + + +class JobControllerGrpcAsyncIOTransport(JobControllerTransport): + """gRPC AsyncIO backend transport for JobController. + + The JobController provides methods to manage jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def submit_job(self) -> Callable[[jobs.SubmitJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the submit job method over gRPC. + + Submits a job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job" not in self._stubs: + self._stubs["submit_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["submit_job"] + + @property + def submit_job_as_operation( + self, + ) -> Callable[[jobs.SubmitJobRequest], Awaitable[operations.Operation]]: + r"""Return a callable for the submit job as operation method over gRPC. + + Submits job to a cluster. + + Returns: + Callable[[~.SubmitJobRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "submit_job_as_operation" not in self._stubs: + self._stubs["submit_job_as_operation"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/SubmitJobAsOperation", + request_serializer=jobs.SubmitJobRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["submit_job_as_operation"] + + @property + def get_job(self) -> Callable[[jobs.GetJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the get job method over gRPC. + + Gets the resource representation for a job in a + project. + + Returns: + Callable[[~.GetJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_job" not in self._stubs: + self._stubs["get_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/GetJob", + request_serializer=jobs.GetJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["get_job"] + + @property + def list_jobs( + self, + ) -> Callable[[jobs.ListJobsRequest], Awaitable[jobs.ListJobsResponse]]: + r"""Return a callable for the list jobs method over gRPC. + + Lists regions/{region}/jobs in a project. + + Returns: + Callable[[~.ListJobsRequest], + Awaitable[~.ListJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_jobs" not in self._stubs: + self._stubs["list_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/ListJobs", + request_serializer=jobs.ListJobsRequest.serialize, + response_deserializer=jobs.ListJobsResponse.deserialize, + ) + return self._stubs["list_jobs"] + + @property + def update_job(self) -> Callable[[jobs.UpdateJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the update job method over gRPC. + + Updates a job in a project. + + Returns: + Callable[[~.UpdateJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_job" not in self._stubs: + self._stubs["update_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", + request_serializer=jobs.UpdateJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["update_job"] + + @property + def cancel_job(self) -> Callable[[jobs.CancelJobRequest], Awaitable[jobs.Job]]: + r"""Return a callable for the cancel job method over gRPC. + + Starts a job cancellation request. To access the job resource + after cancellation, call + `regions/{region}/jobs.list `__ + or + `regions/{region}/jobs.get `__. + + Returns: + Callable[[~.CancelJobRequest], + Awaitable[~.Job]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_job" not in self._stubs: + self._stubs["cancel_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/CancelJob", + request_serializer=jobs.CancelJobRequest.serialize, + response_deserializer=jobs.Job.deserialize, + ) + return self._stubs["cancel_job"] + + @property + def delete_job(self) -> Callable[[jobs.DeleteJobRequest], Awaitable[empty.Empty]]: + r"""Return a callable for the delete job method over gRPC. + + Deletes the job from the project. If the job is active, the + delete fails, and the response returns ``FAILED_PRECONDITION``. + + Returns: + Callable[[~.DeleteJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_job" not in self._stubs: + self._stubs["delete_job"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", + request_serializer=jobs.DeleteJobRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_job"] + + +__all__ = ("JobControllerGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py new file mode 100644 index 00000000..8e92d92d --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/__init__.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .client import WorkflowTemplateServiceClient +from .async_client import WorkflowTemplateServiceAsyncClient + +__all__ = ( + "WorkflowTemplateServiceClient", + "WorkflowTemplateServiceAsyncClient", +) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py new file mode 100644 index 00000000..94e84da0 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/async_client.py @@ -0,0 +1,865 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import WorkflowTemplateServiceTransport +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport +from .client import WorkflowTemplateServiceClient + + +class WorkflowTemplateServiceAsyncClient: + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + _client: WorkflowTemplateServiceClient + + DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT + + workflow_template_path = staticmethod( + WorkflowTemplateServiceClient.workflow_template_path + ) + + from_service_account_file = WorkflowTemplateServiceClient.from_service_account_file + from_service_account_json = from_service_account_file + + get_transport_class = functools.partial( + type(WorkflowTemplateServiceClient).get_transport_class, + type(WorkflowTemplateServiceClient), + ) + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + ) -> None: + """Instantiate the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + + self._client = WorkflowTemplateServiceClient( + credentials=credentials, transport=transport, client_options=client_options, + ) + + async def create_workflow_template( + self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + The request object. A request to create a workflow + template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The Dataproc workflow + template to create. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, template]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.CreateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def get_workflow_template( + self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + The request object. A request to fetch a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.GetWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def instantiate_workflow_template( + self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[ + workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + The request object. A request to instantiate a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 100 + characters. + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name, parameters]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def instantiate_inline_workflow_template( + self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + The request object. A request to instantiate an inline + workflow template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The workflow template to + instantiate. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent, template]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.instantiate_inline_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + async def update_workflow_template( + self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + The request object. A request to update a workflow + template. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([template]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("template.name", request.template.name),) + ), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + async def list_workflow_templates( + self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesAsyncPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The request object. A request to list workflow templates + in a project. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListWorkflowTemplatesAsyncPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([parent]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.ListWorkflowTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_workflow_templates, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListWorkflowTemplatesAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_workflow_template( + self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + if request is not None and any([name]): + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + await rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("WorkflowTemplateServiceAsyncClient",) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py new file mode 100644 index 00000000..5c529342 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/client.py @@ -0,0 +1,980 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +import os +import re +from typing import Callable, Dict, Sequence, Tuple, Type, Union +import pkg_resources + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.api_core import operation +from google.api_core import operation_async +from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.protobuf import empty_pb2 as empty # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + +from .transports.base import WorkflowTemplateServiceTransport +from .transports.grpc import WorkflowTemplateServiceGrpcTransport +from .transports.grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +class WorkflowTemplateServiceClientMeta(type): + """Metaclass for the WorkflowTemplateService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[WorkflowTemplateServiceTransport]] + _transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport + _transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport + + def get_transport_class( + cls, label: str = None, + ) -> Type[WorkflowTemplateServiceTransport]: + """Return an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class WorkflowTemplateServiceClient(metaclass=WorkflowTemplateServiceClientMeta): + """The API interface for managing Workflow Templates in the + Dataproc API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Convert api endpoint to mTLS endpoint. + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + {@api.name}: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @staticmethod + def workflow_template_path( + project: str, region: str, workflow_template: str, + ) -> str: + """Return a fully-qualified workflow_template string.""" + return "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( + project=project, region=region, workflow_template=workflow_template, + ) + + @staticmethod + def parse_workflow_template_path(path: str) -> Dict[str, str]: + """Parse a workflow_template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/workflowTemplates/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + def __init__( + self, + *, + credentials: credentials.Credentials = None, + transport: Union[str, WorkflowTemplateServiceTransport] = None, + client_options: ClientOptions = None, + ) -> None: + """Instantiate the workflow template service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.WorkflowTemplateServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint, this is the default value for + the environment variable) and "auto" (auto switch to the default + mTLS endpoint if client SSL credentials is present). However, + the ``api_endpoint`` property takes precedence if provided. + (2) The ``client_cert_source`` property is used to provide client + SSL credentials for mutual TLS transport. If not provided, the + default SSL credentials will be used if present. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = ClientOptions.from_dict(client_options) + if client_options is None: + client_options = ClientOptions.ClientOptions() + + if client_options.api_endpoint is None: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + if use_mtls_env == "never": + client_options.api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + has_client_cert_source = ( + client_options.client_cert_source is not None + or mtls.has_default_client_cert_source() + ) + client_options.api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT + if has_client_cert_source + else self.DEFAULT_ENDPOINT + ) + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, WorkflowTemplateServiceTransport): + # transport is a WorkflowTemplateServiceTransport instance. + if credentials or client_options.credentials_file: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, " + "provide its scopes directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=client_options.api_endpoint, + scopes=client_options.scopes, + api_mtls_endpoint=client_options.api_endpoint, + client_cert_source=client_options.client_cert_source, + quota_project_id=client_options.quota_project_id, + ) + + def create_workflow_template( + self, + request: workflow_templates.CreateWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Creates new workflow template. + + Args: + request (:class:`~.workflow_templates.CreateWorkflowTemplateRequest`): + The request object. A request to create a workflow + template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The Dataproc workflow + template to create. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.CreateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.CreateWorkflowTemplateRequest): + request = workflow_templates.CreateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def get_workflow_template( + self, + request: workflow_templates.GetWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Args: + request (:class:`~.workflow_templates.GetWorkflowTemplateRequest`): + The request object. A request to fetch a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.GetWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.GetWorkflowTemplateRequest): + request = workflow_templates.GetWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def instantiate_workflow_template( + self, + request: workflow_templates.InstantiateWorkflowTemplateRequest = None, + *, + name: str = None, + parameters: Sequence[ + workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry + ] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateWorkflowTemplateRequest`): + The request object. A request to instantiate a workflow + template. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parameters (:class:`Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]`): + Optional. Map from parameter names to + values that should be used for those + parameters. Values may not exceed 100 + characters. + This corresponds to the ``parameters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, parameters]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, workflow_templates.InstantiateWorkflowTemplateRequest + ): + request = workflow_templates.InstantiateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + if parameters is not None: + request.parameters = parameters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.instantiate_workflow_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def instantiate_inline_workflow_template( + self, + request: workflow_templates.InstantiateInlineWorkflowTemplateRequest = None, + *, + parent: str = None, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Args: + request (:class:`~.workflow_templates.InstantiateInlineWorkflowTemplateRequest`): + The request object. A request to instantiate an inline + workflow template. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following + format: ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The workflow template to + instantiate. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:``~.empty.Empty``: A generic empty message that + you can re-use to avoid defining duplicated empty + messages in your APIs. A typical example is to use it as + the request or the response type of an API method. For + instance: + + :: + + service Foo { + rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); + } + + The JSON representation for ``Empty`` is empty JSON + object ``{}``. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, template]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.InstantiateInlineWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, workflow_templates.InstantiateInlineWorkflowTemplateRequest + ): + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest( + request + ) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.instantiate_inline_workflow_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty.Empty, + metadata_type=workflow_templates.WorkflowMetadata, + ) + + # Done; return the response. + return response + + def update_workflow_template( + self, + request: workflow_templates.UpdateWorkflowTemplateRequest = None, + *, + template: workflow_templates.WorkflowTemplate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> workflow_templates.WorkflowTemplate: + r"""Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Args: + request (:class:`~.workflow_templates.UpdateWorkflowTemplateRequest`): + The request object. A request to update a workflow + template. + template (:class:`~.workflow_templates.WorkflowTemplate`): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + This corresponds to the ``template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.workflow_templates.WorkflowTemplate: + A Dataproc workflow template + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([template]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.UpdateWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.UpdateWorkflowTemplateRequest): + request = workflow_templates.UpdateWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if template is not None: + request.template = template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("template.name", request.template.name),) + ), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Done; return the response. + return response + + def list_workflow_templates( + self, + request: workflow_templates.ListWorkflowTemplatesRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListWorkflowTemplatesPager: + r"""Lists workflows that match the specified filter in + the request. + + Args: + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The request object. A request to list workflow templates + in a project. + parent (:class:`str`): + Required. The resource name of the region or location, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, + the resource name of the location has the following + format: + ``projects/{project_id}/locations/{location}`` + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.pagers.ListWorkflowTemplatesPager: + A response to a request to list + workflow templates in a project. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.ListWorkflowTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.ListWorkflowTemplatesRequest): + request = workflow_templates.ListWorkflowTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_workflow_templates] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListWorkflowTemplatesPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_workflow_template( + self, + request: workflow_templates.DeleteWorkflowTemplateRequest = None, + *, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a workflow template. It does not cancel in- + rogress workflows. + + Args: + request (:class:`~.workflow_templates.DeleteWorkflowTemplateRequest`): + The request object. A request to delete a workflow + template. + Currently started workflows will remain running. + name (:class:`str`): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For + ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a workflow_templates.DeleteWorkflowTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, workflow_templates.DeleteWorkflowTemplateRequest): + request = workflow_templates.DeleteWorkflowTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_workflow_template] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + rpc( + request, retry=retry, timeout=timeout, metadata=metadata, + ) + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +__all__ = ("WorkflowTemplateServiceClient",) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py new file mode 100644 index 00000000..205f2657 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/pagers.py @@ -0,0 +1,152 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple + +from google.cloud.dataproc_v1beta2.types import workflow_templates + + +class ListWorkflowTemplatesPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., workflow_templates.ListWorkflowTemplatesResponse], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The initial request object. + response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[workflow_templates.WorkflowTemplate]: + for page in self.pages: + yield from page.templates + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListWorkflowTemplatesAsyncPager: + """A pager for iterating through ``list_workflow_templates`` requests. + + This class thinly wraps an initial + :class:`~.workflow_templates.ListWorkflowTemplatesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``templates`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListWorkflowTemplates`` requests and continue to iterate + through the ``templates`` field on the + corresponding responses. + + All the usual :class:`~.workflow_templates.ListWorkflowTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[workflow_templates.ListWorkflowTemplatesResponse] + ], + request: workflow_templates.ListWorkflowTemplatesRequest, + response: workflow_templates.ListWorkflowTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (:class:`~.workflow_templates.ListWorkflowTemplatesRequest`): + The initial request object. + response (:class:`~.workflow_templates.ListWorkflowTemplatesResponse`): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = workflow_templates.ListWorkflowTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterable[workflow_templates.ListWorkflowTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[workflow_templates.WorkflowTemplate]: + async def async_generator(): + async for page in self.pages: + for response in page.templates: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py new file mode 100644 index 00000000..eb32b364 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from collections import OrderedDict +from typing import Dict, Type + +from .base import WorkflowTemplateServiceTransport +from .grpc import WorkflowTemplateServiceGrpcTransport +from .grpc_asyncio import WorkflowTemplateServiceGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[WorkflowTemplateServiceTransport]] +_transport_registry["grpc"] = WorkflowTemplateServiceGrpcTransport +_transport_registry["grpc_asyncio"] = WorkflowTemplateServiceGrpcAsyncIOTransport + + +__all__ = ( + "WorkflowTemplateServiceTransport", + "WorkflowTemplateServiceGrpcTransport", + "WorkflowTemplateServiceGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py new file mode 100644 index 00000000..3dc6c0ad --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/base.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import abc +import typing +import pkg_resources + +from google import auth +from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore + +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + + +try: + _client_info = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version, + ) +except pkg_resources.DistributionNotFound: + _client_info = gapic_v1.client_info.ClientInfo() + + +class WorkflowTemplateServiceTransport(abc.ABC): + """Abstract transport class for WorkflowTemplateService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: typing.Optional[str] = None, + scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = auth.load_credentials_from_file( + credentials_file, scopes=scopes, quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) + + # Save the credentials. + self._credentials = credentials + + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages() + + def _prep_wrapped_messages(self): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_workflow_template: gapic_v1.method.wrap_method( + self.create_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.get_workflow_template: gapic_v1.method.wrap_method( + self.get_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.instantiate_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.instantiate_inline_workflow_template: gapic_v1.method.wrap_method( + self.instantiate_inline_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.update_workflow_template: gapic_v1.method.wrap_method( + self.update_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.list_workflow_templates: gapic_v1.method.wrap_method( + self.list_workflow_templates, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.InternalServerError, + exceptions.ServiceUnavailable, + exceptions.DeadlineExceeded, + ), + ), + default_timeout=600.0, + client_info=_client_info, + ), + self.delete_workflow_template: gapic_v1.method.wrap_method( + self.delete_workflow_template, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type(exceptions.ServiceUnavailable,), + ), + default_timeout=600.0, + client_info=_client_info, + ), + } + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + typing.Union[ + workflow_templates.WorkflowTemplate, + typing.Awaitable[workflow_templates.WorkflowTemplate], + ], + ]: + raise NotImplementedError() + + @property + def get_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + typing.Union[ + workflow_templates.WorkflowTemplate, + typing.Awaitable[workflow_templates.WorkflowTemplate], + ], + ]: + raise NotImplementedError() + + @property + def instantiate_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def instantiate_inline_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + typing.Union[operations.Operation, typing.Awaitable[operations.Operation]], + ]: + raise NotImplementedError() + + @property + def update_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + typing.Union[ + workflow_templates.WorkflowTemplate, + typing.Awaitable[workflow_templates.WorkflowTemplate], + ], + ]: + raise NotImplementedError() + + @property + def list_workflow_templates( + self, + ) -> typing.Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + typing.Union[ + workflow_templates.ListWorkflowTemplatesResponse, + typing.Awaitable[workflow_templates.ListWorkflowTemplatesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_workflow_template( + self, + ) -> typing.Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], + typing.Union[empty.Empty, typing.Awaitable[empty.Empty]], + ]: + raise NotImplementedError() + + +__all__ = ("WorkflowTemplateServiceTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py new file mode 100644 index 00000000..6ab10372 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import operations_v1 # type: ignore +from google import auth # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + + +import grpc # type: ignore + +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import WorkflowTemplateServiceTransport + + +class WorkflowTemplateServiceGrpcTransport(WorkflowTemplateServiceTransport): + """gRPC backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + address (Optionsl[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_workflow_template( + self, + ) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate, + ]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_workflow_template" not in self._stubs: + self._stubs["create_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate", + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["create_workflow_template"] + + @property + def get_workflow_template( + self, + ) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate, + ]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_workflow_template" not in self._stubs: + self._stubs["get_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate", + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["get_workflow_template"] + + @property + def instantiate_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], operations.Operation + ]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate", + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_workflow_template"] + + @property + def instantiate_inline_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + operations.Operation, + ]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_inline_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_inline_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate", + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_inline_workflow_template"] + + @property + def update_workflow_template( + self, + ) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + workflow_templates.WorkflowTemplate, + ]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + ~.WorkflowTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_workflow_template" not in self._stubs: + self._stubs["update_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate", + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["update_workflow_template"] + + @property + def list_workflow_templates( + self, + ) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + workflow_templates.ListWorkflowTemplatesResponse, + ]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + ~.ListWorkflowTemplatesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_workflow_templates" not in self._stubs: + self._stubs["list_workflow_templates"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates", + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs["list_workflow_templates"] + + @property + def delete_workflow_template( + self, + ) -> Callable[[workflow_templates.DeleteWorkflowTemplateRequest], empty.Empty]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_workflow_template" not in self._stubs: + self._stubs["delete_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate", + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_workflow_template"] + + +__all__ = ("WorkflowTemplateServiceGrpcTransport",) diff --git a/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py new file mode 100644 index 00000000..d085b7b1 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/services/workflow_template_service/transports/grpc_asyncio.py @@ -0,0 +1,478 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple + +from google.api_core import grpc_helpers_async # type: ignore +from google.api_core import operations_v1 # type: ignore +from google.auth import credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 as operations # type: ignore +from google.protobuf import empty_pb2 as empty # type: ignore + +from .base import WorkflowTemplateServiceTransport +from .grpc import WorkflowTemplateServiceGrpcTransport + + +class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTransport): + """gRPC AsyncIO backend transport for WorkflowTemplateService. + + The API interface for managing Workflow Templates in the + Dataproc API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + address (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + scopes = scopes or cls.AUTH_SCOPES + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If + provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A + callback to provide client SSL certificate bytes and private key + bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` + is None. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + if channel: + # Sanity check: Ensure that channel and credentials are not both + # provided. + credentials = False + + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + elif api_mtls_endpoint: + host = ( + api_mtls_endpoint + if ":" in api_mtls_endpoint + else api_mtls_endpoint + ":443" + ) + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + ssl_credentials = SslCredentials().ssl_credentials + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + # Run the base constructor. + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Sanity check: Only create a new channel if we do not already + # have one. + if not hasattr(self, "_grpc_channel"): + self._grpc_channel = self.create_channel( + self._host, credentials=self._credentials, + ) + + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Sanity check: Only create a new client if we do not already have one. + if "operations_client" not in self.__dict__: + self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self.__dict__["operations_client"] + + @property + def create_workflow_template( + self, + ) -> Callable[ + [workflow_templates.CreateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate], + ]: + r"""Return a callable for the create workflow template method over gRPC. + + Creates new workflow template. + + Returns: + Callable[[~.CreateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_workflow_template" not in self._stubs: + self._stubs["create_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate", + request_serializer=workflow_templates.CreateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["create_workflow_template"] + + @property + def get_workflow_template( + self, + ) -> Callable[ + [workflow_templates.GetWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate], + ]: + r"""Return a callable for the get workflow template method over gRPC. + + Retrieves the latest workflow template. + Can retrieve previously instantiated template by + specifying optional version parameter. + + Returns: + Callable[[~.GetWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_workflow_template" not in self._stubs: + self._stubs["get_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate", + request_serializer=workflow_templates.GetWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["get_workflow_template"] + + @property + def instantiate_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateWorkflowTemplateRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the instantiate workflow template method over gRPC. + + Instantiates a template and begins execution. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate", + request_serializer=workflow_templates.InstantiateWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_workflow_template"] + + @property + def instantiate_inline_workflow_template( + self, + ) -> Callable[ + [workflow_templates.InstantiateInlineWorkflowTemplateRequest], + Awaitable[operations.Operation], + ]: + r"""Return a callable for the instantiate inline workflow + template method over gRPC. + + Instantiates a template and begins execution. + + This method is equivalent to executing the sequence + [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], + [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + + The returned Operation can be used to track execution of + workflow by polling + [operations.get][google.longrunning.Operations.GetOperation]. + The Operation will complete when entire workflow is finished. + + The running workflow can be aborted via + [operations.cancel][google.longrunning.Operations.CancelOperation]. + This will cause any inflight jobs to be cancelled and + workflow-owned clusters to be deleted. + + The [Operation.metadata][google.longrunning.Operation.metadata] + will be + `WorkflowMetadata `__. + Also see `Using + WorkflowMetadata `__. + + On successful completion, + [Operation.response][google.longrunning.Operation.response] will + be [Empty][google.protobuf.Empty]. + + Returns: + Callable[[~.InstantiateInlineWorkflowTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "instantiate_inline_workflow_template" not in self._stubs: + self._stubs[ + "instantiate_inline_workflow_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateInlineWorkflowTemplate", + request_serializer=workflow_templates.InstantiateInlineWorkflowTemplateRequest.serialize, + response_deserializer=operations.Operation.FromString, + ) + return self._stubs["instantiate_inline_workflow_template"] + + @property + def update_workflow_template( + self, + ) -> Callable[ + [workflow_templates.UpdateWorkflowTemplateRequest], + Awaitable[workflow_templates.WorkflowTemplate], + ]: + r"""Return a callable for the update workflow template method over gRPC. + + Updates (replaces) workflow template. The updated + template must contain version that matches the current + server version. + + Returns: + Callable[[~.UpdateWorkflowTemplateRequest], + Awaitable[~.WorkflowTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_workflow_template" not in self._stubs: + self._stubs["update_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate", + request_serializer=workflow_templates.UpdateWorkflowTemplateRequest.serialize, + response_deserializer=workflow_templates.WorkflowTemplate.deserialize, + ) + return self._stubs["update_workflow_template"] + + @property + def list_workflow_templates( + self, + ) -> Callable[ + [workflow_templates.ListWorkflowTemplatesRequest], + Awaitable[workflow_templates.ListWorkflowTemplatesResponse], + ]: + r"""Return a callable for the list workflow templates method over gRPC. + + Lists workflows that match the specified filter in + the request. + + Returns: + Callable[[~.ListWorkflowTemplatesRequest], + Awaitable[~.ListWorkflowTemplatesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_workflow_templates" not in self._stubs: + self._stubs["list_workflow_templates"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates", + request_serializer=workflow_templates.ListWorkflowTemplatesRequest.serialize, + response_deserializer=workflow_templates.ListWorkflowTemplatesResponse.deserialize, + ) + return self._stubs["list_workflow_templates"] + + @property + def delete_workflow_template( + self, + ) -> Callable[ + [workflow_templates.DeleteWorkflowTemplateRequest], Awaitable[empty.Empty] + ]: + r"""Return a callable for the delete workflow template method over gRPC. + + Deletes a workflow template. It does not cancel in- + rogress workflows. + + Returns: + Callable[[~.DeleteWorkflowTemplateRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_workflow_template" not in self._stubs: + self._stubs["delete_workflow_template"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate", + request_serializer=workflow_templates.DeleteWorkflowTemplateRequest.serialize, + response_deserializer=empty.Empty.FromString, + ) + return self._stubs["delete_workflow_template"] + + +__all__ = ("WorkflowTemplateServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1beta2/types.py b/google/cloud/dataproc_v1beta2/types.py deleted file mode 100644 index 23d3f87e..00000000 --- a/google/cloud/dataproc_v1beta2/types.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from __future__ import absolute_import -import sys - -from google.api_core.protobuf_helpers import get_messages - -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2 -from google.cloud.dataproc_v1beta2.proto import clusters_pb2 -from google.cloud.dataproc_v1beta2.proto import jobs_pb2 -from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2 -from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2 -from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import any_pb2 -from google.protobuf import duration_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -from google.protobuf import timestamp_pb2 -from google.rpc import status_pb2 - - -_shared_modules = [ - longrunning_operations_pb2, - any_pb2, - duration_pb2, - empty_pb2, - field_mask_pb2, - timestamp_pb2, - status_pb2, -] - -_local_modules = [ - autoscaling_policies_pb2, - clusters_pb2, - jobs_pb2, - proto_operations_pb2, - workflow_templates_pb2, -] - -names = [] - -for module in _shared_modules: # pragma: NO COVER - for name, message in get_messages(module).items(): - setattr(sys.modules[__name__], name, message) - names.append(name) -for module in _local_modules: - for name, message in get_messages(module).items(): - message.__module__ = "google.cloud.dataproc_v1beta2.types" - setattr(sys.modules[__name__], name, message) - names.append(name) - - -__all__ = tuple(sorted(names)) diff --git a/google/cloud/dataproc_v1beta2/types/__init__.py b/google/cloud/dataproc_v1beta2/types/__init__.py new file mode 100644 index 00000000..9aa20055 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/__init__.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .autoscaling_policies import ( + AutoscalingPolicy, + BasicAutoscalingAlgorithm, + BasicYarnAutoscalingConfig, + InstanceGroupAutoscalingPolicyConfig, + CreateAutoscalingPolicyRequest, + GetAutoscalingPolicyRequest, + UpdateAutoscalingPolicyRequest, + DeleteAutoscalingPolicyRequest, + ListAutoscalingPoliciesRequest, + ListAutoscalingPoliciesResponse, +) +from .clusters import ( + Cluster, + ClusterConfig, + GkeClusterConfig, + EndpointConfig, + AutoscalingConfig, + EncryptionConfig, + GceClusterConfig, + InstanceGroupConfig, + ManagedGroupConfig, + AcceleratorConfig, + DiskConfig, + LifecycleConfig, + SecurityConfig, + KerberosConfig, + NodeInitializationAction, + ClusterStatus, + SoftwareConfig, + ClusterMetrics, + CreateClusterRequest, + UpdateClusterRequest, + DeleteClusterRequest, + GetClusterRequest, + ListClustersRequest, + ListClustersResponse, + DiagnoseClusterRequest, + DiagnoseClusterResults, + ReservationAffinity, +) +from .jobs import ( + LoggingConfig, + HadoopJob, + SparkJob, + PySparkJob, + QueryList, + HiveJob, + SparkSqlJob, + PigJob, + SparkRJob, + PrestoJob, + JobPlacement, + JobStatus, + JobReference, + YarnApplication, + Job, + JobScheduling, + JobMetadata, + SubmitJobRequest, + GetJobRequest, + ListJobsRequest, + UpdateJobRequest, + ListJobsResponse, + CancelJobRequest, + DeleteJobRequest, +) +from .operations import ( + ClusterOperationStatus, + ClusterOperationMetadata, +) +from .workflow_templates import ( + WorkflowTemplate, + WorkflowTemplatePlacement, + ManagedCluster, + ClusterSelector, + OrderedJob, + TemplateParameter, + ParameterValidation, + RegexValidation, + ValueValidation, + WorkflowMetadata, + ClusterOperation, + WorkflowGraph, + WorkflowNode, + CreateWorkflowTemplateRequest, + GetWorkflowTemplateRequest, + InstantiateWorkflowTemplateRequest, + InstantiateInlineWorkflowTemplateRequest, + UpdateWorkflowTemplateRequest, + ListWorkflowTemplatesRequest, + ListWorkflowTemplatesResponse, + DeleteWorkflowTemplateRequest, +) + + +__all__ = ( + "AutoscalingPolicy", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "InstanceGroupAutoscalingPolicyConfig", + "CreateAutoscalingPolicyRequest", + "GetAutoscalingPolicyRequest", + "UpdateAutoscalingPolicyRequest", + "DeleteAutoscalingPolicyRequest", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + "Cluster", + "ClusterConfig", + "GkeClusterConfig", + "EndpointConfig", + "AutoscalingConfig", + "EncryptionConfig", + "GceClusterConfig", + "InstanceGroupConfig", + "ManagedGroupConfig", + "AcceleratorConfig", + "DiskConfig", + "LifecycleConfig", + "SecurityConfig", + "KerberosConfig", + "NodeInitializationAction", + "ClusterStatus", + "SoftwareConfig", + "ClusterMetrics", + "CreateClusterRequest", + "UpdateClusterRequest", + "DeleteClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "ReservationAffinity", + "LoggingConfig", + "HadoopJob", + "SparkJob", + "PySparkJob", + "QueryList", + "HiveJob", + "SparkSqlJob", + "PigJob", + "SparkRJob", + "PrestoJob", + "JobPlacement", + "JobStatus", + "JobReference", + "YarnApplication", + "Job", + "JobScheduling", + "JobMetadata", + "SubmitJobRequest", + "GetJobRequest", + "ListJobsRequest", + "UpdateJobRequest", + "ListJobsResponse", + "CancelJobRequest", + "DeleteJobRequest", + "ClusterOperationStatus", + "ClusterOperationMetadata", + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "ManagedCluster", + "ClusterSelector", + "OrderedJob", + "TemplateParameter", + "ParameterValidation", + "RegexValidation", + "ValueValidation", + "WorkflowMetadata", + "ClusterOperation", + "WorkflowGraph", + "WorkflowNode", + "CreateWorkflowTemplateRequest", + "GetWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "InstantiateInlineWorkflowTemplateRequest", + "UpdateWorkflowTemplateRequest", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "DeleteWorkflowTemplateRequest", +) diff --git a/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py new file mode 100644 index 00000000..453f4954 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/autoscaling_policies.py @@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import duration_pb2 as duration # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1beta2", + manifest={ + "AutoscalingPolicy", + "BasicAutoscalingAlgorithm", + "BasicYarnAutoscalingConfig", + "InstanceGroupAutoscalingPolicyConfig", + "CreateAutoscalingPolicyRequest", + "GetAutoscalingPolicyRequest", + "UpdateAutoscalingPolicyRequest", + "DeleteAutoscalingPolicyRequest", + "ListAutoscalingPoliciesRequest", + "ListAutoscalingPoliciesResponse", + }, +) + + +class AutoscalingPolicy(proto.Message): + r"""Describes an autoscaling policy for Dataproc cluster + autoscaler. + + Attributes: + id (str): + Required. The policy id. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + name (str): + Output only. The "resource name" of the autoscaling policy, + as described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + basic_algorithm (~.autoscaling_policies.BasicAutoscalingAlgorithm): + + worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + Required. Describes how the autoscaler will + operate for primary workers. + secondary_worker_config (~.autoscaling_policies.InstanceGroupAutoscalingPolicyConfig): + Optional. Describes how the autoscaler will + operate for secondary workers. + """ + + id = proto.Field(proto.STRING, number=1) + + name = proto.Field(proto.STRING, number=2) + + basic_algorithm = proto.Field( + proto.MESSAGE, number=3, oneof="algorithm", message="BasicAutoscalingAlgorithm", + ) + + worker_config = proto.Field( + proto.MESSAGE, number=4, message="InstanceGroupAutoscalingPolicyConfig", + ) + + secondary_worker_config = proto.Field( + proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig", + ) + + +class BasicAutoscalingAlgorithm(proto.Message): + r"""Basic algorithm for autoscaling. + + Attributes: + yarn_config (~.autoscaling_policies.BasicYarnAutoscalingConfig): + Required. YARN autoscaling configuration. + cooldown_period (~.duration.Duration): + Optional. Duration between scaling events. A scaling period + starts after the update operation from the previous event + has completed. + + Bounds: [2m, 1d]. Default: 2m. + """ + + yarn_config = proto.Field( + proto.MESSAGE, number=1, message="BasicYarnAutoscalingConfig", + ) + + cooldown_period = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + +class BasicYarnAutoscalingConfig(proto.Message): + r"""Basic autoscaling configurations for YARN. + + Attributes: + graceful_decommission_timeout (~.duration.Duration): + Required. Timeout for YARN graceful decommissioning of Node + Managers. Specifies the duration to wait for jobs to + complete before forcefully removing workers (and potentially + interrupting jobs). Only applicable to downscaling + operations. + + Bounds: [0s, 1d]. + scale_up_factor (float): + Required. Fraction of average pending memory in the last + cooldown period for which to add workers. A scale-up factor + of 1.0 will result in scaling up so that there is no pending + memory remaining after the update (more aggressive scaling). + A scale-up factor closer to 0 will result in a smaller + magnitude of scaling up (less aggressive scaling). + + Bounds: [0.0, 1.0]. + scale_down_factor (float): + Required. Fraction of average pending memory in the last + cooldown period for which to remove workers. A scale-down + factor of 1 will result in scaling down so that there is no + available memory remaining after the update (more aggressive + scaling). A scale-down factor of 0 disables removing + workers, which can be beneficial for autoscaling a single + job. + + Bounds: [0.0, 1.0]. + scale_up_min_worker_fraction (float): + Optional. Minimum scale-up threshold as a fraction of total + cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2-worker scale-up for the cluster + to scale. A threshold of 0 means the autoscaler will scale + up on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + scale_down_min_worker_fraction (float): + Optional. Minimum scale-down threshold as a fraction of + total cluster size before scaling occurs. For example, in a + 20-worker cluster, a threshold of 0.1 means the autoscaler + must recommend at least a 2 worker scale-down for the + cluster to scale. A threshold of 0 means the autoscaler will + scale down on any recommended change. + + Bounds: [0.0, 1.0]. Default: 0.0. + """ + + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, number=5, message=duration.Duration, + ) + + scale_up_factor = proto.Field(proto.DOUBLE, number=1) + + scale_down_factor = proto.Field(proto.DOUBLE, number=2) + + scale_up_min_worker_fraction = proto.Field(proto.DOUBLE, number=3) + + scale_down_min_worker_fraction = proto.Field(proto.DOUBLE, number=4) + + +class InstanceGroupAutoscalingPolicyConfig(proto.Message): + r"""Configuration for the size bounds of an instance group, + including its proportional size to other groups. + + Attributes: + min_instances (int): + Optional. Minimum number of instances for this group. + + Primary workers - Bounds: [2, max_instances]. Default: 2. + Secondary workers - Bounds: [0, max_instances]. Default: 0. + max_instances (int): + Optional. Maximum number of instances for this group. + Required for primary workers. Note that by default, clusters + will not use secondary workers. Required for secondary + workers if the minimum secondary instances is set. + + Primary workers - Bounds: [min_instances, ). Required. + Secondary workers - Bounds: [min_instances, ). Default: 0. + weight (int): + Optional. Weight for the instance group, which is used to + determine the fraction of total workers in the cluster from + this instance group. For example, if primary workers have + weight 2, and secondary workers have weight 1, the cluster + will have approximately 2 primary workers for each secondary + worker. + + The cluster may not reach the specified balance if + constrained by min/max bounds or other autoscaling settings. + For example, if ``max_instances`` for secondary workers is + 0, then only primary workers will be added. The cluster can + also be out of balance when created. + + If weight is not set on any instance group, the cluster will + default to equal weight for all groups: the cluster will + attempt to maintain an equal number of workers in each group + within the configured size bounds for each group. If weight + is set for one group only, the cluster will default to zero + weight on the unset group. For example if weight is set only + on primary workers, the cluster will use primary workers + only and no secondary workers. + """ + + min_instances = proto.Field(proto.INT32, number=1) + + max_instances = proto.Field(proto.INT32, number=2) + + weight = proto.Field(proto.INT32, number=3) + + +class CreateAutoscalingPolicyRequest(proto.Message): + r"""A request to create an autoscaling policy. + + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.create``, the + resource name has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.create``, + the resource name has the following format: + ``projects/{project_id}/locations/{location}`` + policy (~.autoscaling_policies.AutoscalingPolicy): + Required. The autoscaling policy to create. + """ + + parent = proto.Field(proto.STRING, number=1) + + policy = proto.Field(proto.MESSAGE, number=2, message=AutoscalingPolicy,) + + +class GetAutoscalingPolicyRequest(proto.Message): + r"""A request to fetch an autoscaling policy. + + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.get``, the + resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class UpdateAutoscalingPolicyRequest(proto.Message): + r"""A request to update an autoscaling policy. + + Attributes: + policy (~.autoscaling_policies.AutoscalingPolicy): + Required. The updated autoscaling policy. + """ + + policy = proto.Field(proto.MESSAGE, number=1, message=AutoscalingPolicy,) + + +class DeleteAutoscalingPolicyRequest(proto.Message): + r"""A request to delete an autoscaling policy. + Autoscaling policies in use by one or more clusters will not be + deleted. + + Attributes: + name (str): + Required. The "resource name" of the autoscaling policy, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.delete``, the + resource name of the policy has the following format: + ``projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`` + + - For ``projects.locations.autoscalingPolicies.delete``, + the resource name of the policy has the following format: + ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` + """ + + name = proto.Field(proto.STRING, number=1) + + +class ListAutoscalingPoliciesRequest(proto.Message): + r"""A request to list autoscaling policies in a project. + + Attributes: + parent (str): + Required. The "resource name" of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.autoscalingPolicies.list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.autoscalingPolicies.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. Must be less than or + equal to 1000. Defaults to 100. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListAutoscalingPoliciesResponse(proto.Message): + r"""A response to a request to list autoscaling policies in a + project. + + Attributes: + policies (Sequence[~.autoscaling_policies.AutoscalingPolicy]): + Output only. Autoscaling policies list. + next_page_token (str): + Output only. This token is included in the + response if there are more results to fetch. + """ + + @property + def raw_page(self): + return self + + policies = proto.RepeatedField(proto.MESSAGE, number=1, message=AutoscalingPolicy,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1beta2/types/clusters.py b/google/cloud/dataproc_v1beta2/types/clusters.py new file mode 100644 index 00000000..d2a3a450 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/clusters.py @@ -0,0 +1,1255 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.dataproc_v1beta2.types import shared +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1beta2", + manifest={ + "Cluster", + "ClusterConfig", + "GkeClusterConfig", + "EndpointConfig", + "AutoscalingConfig", + "EncryptionConfig", + "GceClusterConfig", + "InstanceGroupConfig", + "ManagedGroupConfig", + "AcceleratorConfig", + "DiskConfig", + "LifecycleConfig", + "SecurityConfig", + "KerberosConfig", + "NodeInitializationAction", + "ClusterStatus", + "SoftwareConfig", + "ClusterMetrics", + "CreateClusterRequest", + "UpdateClusterRequest", + "DeleteClusterRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "DiagnoseClusterRequest", + "DiagnoseClusterResults", + "ReservationAffinity", + }, +) + + +class Cluster(proto.Message): + r"""Describes the identifying information, config, and status of + a cluster of Compute Engine instances. + + Attributes: + project_id (str): + Required. The Google Cloud Platform project + ID that the cluster belongs to. + cluster_name (str): + Required. The cluster name. Cluster names + within a project must be unique. Names of + deleted clusters can be reused. + config (~.gcd_clusters.ClusterConfig): + Required. The cluster config. Note that + Dataproc may set default values, and values may + change when clusters are updated. + labels (Sequence[~.gcd_clusters.Cluster.LabelsEntry]): + Optional. The labels to associate with this cluster. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a cluster. + status (~.gcd_clusters.ClusterStatus): + Output only. Cluster status. + status_history (Sequence[~.gcd_clusters.ClusterStatus]): + Output only. The previous cluster status. + cluster_uuid (str): + Output only. A cluster UUID (Unique Universal + Identifier). Dataproc generates this value when + it creates the cluster. + metrics (~.gcd_clusters.ClusterMetrics): + Output only. Contains cluster daemon metrics such as HDFS + and YARN stats. + + **Beta Feature**: This report is available for testing + purposes only. It may be changed before final release. + """ + + project_id = proto.Field(proto.STRING, number=1) + + cluster_name = proto.Field(proto.STRING, number=2) + + config = proto.Field(proto.MESSAGE, number=3, message="ClusterConfig",) + + labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + status = proto.Field(proto.MESSAGE, number=4, message="ClusterStatus",) + + status_history = proto.RepeatedField( + proto.MESSAGE, number=7, message="ClusterStatus", + ) + + cluster_uuid = proto.Field(proto.STRING, number=6) + + metrics = proto.Field(proto.MESSAGE, number=9, message="ClusterMetrics",) + + +class ClusterConfig(proto.Message): + r"""The cluster config. + + Attributes: + config_bucket (str): + Optional. A Cloud Storage bucket used to stage job + dependencies, config files, and job driver console output. + If you do not specify a staging bucket, Cloud Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's staging bucket according to the Compute + Engine zone where your cluster is deployed, and then create + and manage this project-level, per-location bucket (see + `Dataproc staging + bucket `__). + gce_cluster_config (~.gcd_clusters.GceClusterConfig): + Optional. The shared Compute Engine config + settings for all instances in a cluster. + master_config (~.gcd_clusters.InstanceGroupConfig): + Optional. The Compute Engine config settings + for the master instance in a cluster. + worker_config (~.gcd_clusters.InstanceGroupConfig): + Optional. The Compute Engine config settings + for worker instances in a cluster. + secondary_worker_config (~.gcd_clusters.InstanceGroupConfig): + Optional. The Compute Engine config settings + for additional worker instances in a cluster. + software_config (~.gcd_clusters.SoftwareConfig): + Optional. The config settings for software + inside the cluster. + lifecycle_config (~.gcd_clusters.LifecycleConfig): + Optional. The config setting for auto delete + cluster schedule. + initialization_actions (Sequence[~.gcd_clusters.NodeInitializationAction]): + Optional. Commands to execute on each node after config is + completed. By default, executables are run on master and all + worker nodes. You can test a node's role metadata to run an + executable on a master or worker node, as shown below using + ``curl`` (you can also use ``wget``): + + :: + + ROLE=$(curl -H Metadata-Flavor:Google + http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + if [[ "${ROLE}" == 'Master' ]]; then + ... master specific actions ... + else + ... worker specific actions ... + fi + encryption_config (~.gcd_clusters.EncryptionConfig): + Optional. Encryption settings for the + cluster. + autoscaling_config (~.gcd_clusters.AutoscalingConfig): + Optional. Autoscaling config for the policy + associated with the cluster. Cluster does not + autoscale if this field is unset. + endpoint_config (~.gcd_clusters.EndpointConfig): + Optional. Port/endpoint configuration for + this cluster + security_config (~.gcd_clusters.SecurityConfig): + Optional. Security related configuration. + gke_cluster_config (~.gcd_clusters.GkeClusterConfig): + Optional. The Kubernetes Engine config for Dataproc clusters + deployed to Kubernetes. Setting this is considered mutually + exclusive with Compute Engine-based options such as + ``gce_cluster_config``, ``master_config``, + ``worker_config``, ``secondary_worker_config``, and + ``autoscaling_config``. + """ + + config_bucket = proto.Field(proto.STRING, number=1) + + gce_cluster_config = proto.Field( + proto.MESSAGE, number=8, message="GceClusterConfig", + ) + + master_config = proto.Field(proto.MESSAGE, number=9, message="InstanceGroupConfig",) + + worker_config = proto.Field( + proto.MESSAGE, number=10, message="InstanceGroupConfig", + ) + + secondary_worker_config = proto.Field( + proto.MESSAGE, number=12, message="InstanceGroupConfig", + ) + + software_config = proto.Field(proto.MESSAGE, number=13, message="SoftwareConfig",) + + lifecycle_config = proto.Field(proto.MESSAGE, number=14, message="LifecycleConfig",) + + initialization_actions = proto.RepeatedField( + proto.MESSAGE, number=11, message="NodeInitializationAction", + ) + + encryption_config = proto.Field( + proto.MESSAGE, number=15, message="EncryptionConfig", + ) + + autoscaling_config = proto.Field( + proto.MESSAGE, number=16, message="AutoscalingConfig", + ) + + endpoint_config = proto.Field(proto.MESSAGE, number=17, message="EndpointConfig",) + + security_config = proto.Field(proto.MESSAGE, number=18, message="SecurityConfig",) + + gke_cluster_config = proto.Field( + proto.MESSAGE, number=19, message="GkeClusterConfig", + ) + + +class GkeClusterConfig(proto.Message): + r"""The GKE config for this cluster. + + Attributes: + namespaced_gke_deployment_target (~.gcd_clusters.GkeClusterConfig.NamespacedGkeDeploymentTarget): + Optional. A target for the deployment. + """ + + class NamespacedGkeDeploymentTarget(proto.Message): + r"""A full, namespace-isolated deployment target for an existing + GKE cluster. + + Attributes: + target_gke_cluster (str): + Optional. The target GKE cluster to deploy to. Format: + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + cluster_namespace (str): + Optional. A namespace within the GKE cluster + to deploy into. + """ + + target_gke_cluster = proto.Field(proto.STRING, number=1) + + cluster_namespace = proto.Field(proto.STRING, number=2) + + namespaced_gke_deployment_target = proto.Field( + proto.MESSAGE, number=1, message=NamespacedGkeDeploymentTarget, + ) + + +class EndpointConfig(proto.Message): + r"""Endpoint config for this cluster + + Attributes: + http_ports (Sequence[~.gcd_clusters.EndpointConfig.HttpPortsEntry]): + Output only. The map of port descriptions to URLs. Will only + be populated if enable_http_port_access is true. + enable_http_port_access (bool): + Optional. If true, enable http access to + specific ports on the cluster from external + sources. Defaults to false. + """ + + http_ports = proto.MapField(proto.STRING, proto.STRING, number=1) + + enable_http_port_access = proto.Field(proto.BOOL, number=2) + + +class AutoscalingConfig(proto.Message): + r"""Autoscaling Policy config associated with the cluster. + + Attributes: + policy_uri (str): + Optional. The autoscaling policy used by the cluster. + + Only resource names including projectid and location + (region) are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + - ``projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`` + + Note that the policy must be in the same project and + Dataproc region. + """ + + policy_uri = proto.Field(proto.STRING, number=1) + + +class EncryptionConfig(proto.Message): + r"""Encryption settings for the cluster. + + Attributes: + gce_pd_kms_key_name (str): + Optional. The Cloud KMS key name to use for + PD disk encryption for all instances in the + cluster. + """ + + gce_pd_kms_key_name = proto.Field(proto.STRING, number=1) + + +class GceClusterConfig(proto.Message): + r"""Common config settings for resources of Compute Engine + cluster instances, applicable to all instances in the cluster. + + Attributes: + zone_uri (str): + Optional. The zone where the Compute Engine cluster will be + located. On a create request, it is required in the "global" + region. If omitted in a non-global Dataproc region, the + service will pick a zone in the corresponding Compute Engine + region. On a get request, zone will always be present. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`` + - ``projects/[project_id]/zones/[zone]`` + - ``us-central1-f`` + network_uri (str): + Optional. The Compute Engine network to be used for machine + communications. Cannot be specified with subnetwork_uri. If + neither ``network_uri`` nor ``subnetwork_uri`` is specified, + the "default" network of the project is used, if it exists. + Cannot be a "Custom Subnet Network" (see `Using + Subnetworks `__ + for more information). + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`` + - ``projects/[project_id]/regions/global/default`` + - ``default`` + subnetwork_uri (str): + Optional. The Compute Engine subnetwork to be used for + machine communications. Cannot be specified with + network_uri. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``projects/[project_id]/regions/us-east1/subnetworks/sub0`` + - ``sub0`` + internal_ip_only (bool): + Optional. If true, all instances in the cluster will only + have internal IP addresses. By default, clusters are not + restricted to internal IP addresses, and will have ephemeral + external IP addresses assigned to each instance. This + ``internal_ip_only`` restriction can only be enabled for + subnetwork enabled networks, and all off-cluster + dependencies must be configured to be accessible without + external IP addresses. + service_account (str): + Optional. The `Dataproc service + account `__ + (also see `VM Data Plane + identity `__) + used by Dataproc cluster VM instances to access Google Cloud + Platform services. + + If not specified, the `Compute Engine default service + account `__ + is used. + service_account_scopes (Sequence[str]): + Optional. The URIs of service account scopes to be included + in Compute Engine instances. The following base set of + scopes is always included: + + - https://www.googleapis.com/auth/cloud.useraccounts.readonly + - https://www.googleapis.com/auth/devstorage.read_write + - https://www.googleapis.com/auth/logging.write + + If no scopes are specified, the following defaults are also + provided: + + - https://www.googleapis.com/auth/bigquery + - https://www.googleapis.com/auth/bigtable.admin.table + - https://www.googleapis.com/auth/bigtable.data + - https://www.googleapis.com/auth/devstorage.full_control + tags (Sequence[str]): + The Compute Engine tags to add to all instances (see + `Tagging + instances `__). + metadata (Sequence[~.gcd_clusters.GceClusterConfig.MetadataEntry]): + The Compute Engine metadata entries to add to all instances + (see `Project and instance + metadata `__). + reservation_affinity (~.gcd_clusters.ReservationAffinity): + Optional. Reservation Affinity for consuming + Zonal reservation. + """ + + zone_uri = proto.Field(proto.STRING, number=1) + + network_uri = proto.Field(proto.STRING, number=2) + + subnetwork_uri = proto.Field(proto.STRING, number=6) + + internal_ip_only = proto.Field(proto.BOOL, number=7) + + service_account = proto.Field(proto.STRING, number=8) + + service_account_scopes = proto.RepeatedField(proto.STRING, number=3) + + tags = proto.RepeatedField(proto.STRING, number=4) + + metadata = proto.MapField(proto.STRING, proto.STRING, number=5) + + reservation_affinity = proto.Field( + proto.MESSAGE, number=11, message="ReservationAffinity", + ) + + +class InstanceGroupConfig(proto.Message): + r"""The config settings for Compute Engine resources in + an instance group, such as a master or worker group. + + Attributes: + num_instances (int): + Optional. The number of VM instances in the + instance group. For master instance groups, must + be set to 1. + instance_names (Sequence[str]): + Output only. The list of instance names. Dataproc derives + the names from ``cluster_name``, ``num_instances``, and the + instance group. + image_uri (str): + Optional. The Compute Engine image resource used for cluster + instances. + + The URI can represent an image or image family. + + Image examples: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`` + - ``projects/[project_id]/global/images/[image-id]`` + - ``image-id`` + + Image family examples. Dataproc will use the most recent + image from the family: + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`` + - ``projects/[project_id]/global/images/family/[custom-image-family-name]`` + + If the URI is unspecified, it will be inferred from + ``SoftwareConfig.image_version`` or the system default. + machine_type_uri (str): + Optional. The Compute Engine machine type used for cluster + instances. + + A full URL, partial URI, or short name are valid. Examples: + + - ``https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`` + - ``n1-standard-2`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the machine type + resource, for example, ``n1-standard-2``. + disk_config (~.gcd_clusters.DiskConfig): + Optional. Disk option config settings. + is_preemptible (bool): + Output only. Specifies that this instance + group contains preemptible instances. + managed_group_config (~.gcd_clusters.ManagedGroupConfig): + Output only. The config for Compute Engine + Instance Group Manager that manages this group. + This is only used for preemptible instance + groups. + accelerators (Sequence[~.gcd_clusters.AcceleratorConfig]): + Optional. The Compute Engine accelerator + configuration for these instances. + min_cpu_platform (str): + Specifies the minimum cpu platform for the Instance Group. + See `Dataproc -> Minimum CPU + Platform `__. + """ + + num_instances = proto.Field(proto.INT32, number=1) + + instance_names = proto.RepeatedField(proto.STRING, number=2) + + image_uri = proto.Field(proto.STRING, number=3) + + machine_type_uri = proto.Field(proto.STRING, number=4) + + disk_config = proto.Field(proto.MESSAGE, number=5, message="DiskConfig",) + + is_preemptible = proto.Field(proto.BOOL, number=6) + + managed_group_config = proto.Field( + proto.MESSAGE, number=7, message="ManagedGroupConfig", + ) + + accelerators = proto.RepeatedField( + proto.MESSAGE, number=8, message="AcceleratorConfig", + ) + + min_cpu_platform = proto.Field(proto.STRING, number=9) + + +class ManagedGroupConfig(proto.Message): + r"""Specifies the resources used to actively manage an instance + group. + + Attributes: + instance_template_name (str): + Output only. The name of the Instance + Template used for the Managed Instance Group. + instance_group_manager_name (str): + Output only. The name of the Instance Group + Manager for this group. + """ + + instance_template_name = proto.Field(proto.STRING, number=1) + + instance_group_manager_name = proto.Field(proto.STRING, number=2) + + +class AcceleratorConfig(proto.Message): + r"""Specifies the type and number of accelerator cards attached to the + instances of an instance group (see `GPUs on Compute + Engine `__). + + Attributes: + accelerator_type_uri (str): + Full URL, partial URI, or short name of the accelerator type + resource to expose to this instance. See `Compute Engine + AcceleratorTypes `__ + + Examples + + - ``https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`` + - ``nvidia-tesla-k80`` + + **Auto Zone Exception**: If you are using the Dataproc `Auto + Zone + Placement `__ + feature, you must use the short name of the accelerator type + resource, for example, ``nvidia-tesla-k80``. + accelerator_count (int): + The number of the accelerator cards of this + type exposed to this instance. + """ + + accelerator_type_uri = proto.Field(proto.STRING, number=1) + + accelerator_count = proto.Field(proto.INT32, number=2) + + +class DiskConfig(proto.Message): + r"""Specifies the config of disk options for a group of VM + instances. + + Attributes: + boot_disk_type (str): + Optional. Type of the boot disk (default is + "pd-standard"). Valid values: "pd-ssd" + (Persistent Disk Solid State Drive) or "pd- + standard" (Persistent Disk Hard Disk Drive). + boot_disk_size_gb (int): + Optional. Size in GB of the boot disk + (default is 500GB). + num_local_ssds (int): + Number of attached SSDs, from 0 to 4 (default is 0). If SSDs + are not attached, the boot disk is used to store runtime + logs and + `HDFS `__ + data. If one or more SSDs are attached, this runtime bulk + data is spread across them, and the boot disk contains only + basic config and installed binaries. + """ + + boot_disk_type = proto.Field(proto.STRING, number=3) + + boot_disk_size_gb = proto.Field(proto.INT32, number=1) + + num_local_ssds = proto.Field(proto.INT32, number=2) + + +class LifecycleConfig(proto.Message): + r"""Specifies the cluster auto-delete schedule configuration. + + Attributes: + idle_delete_ttl (~.duration.Duration): + Optional. The duration to keep the cluster alive while + idling (when no jobs are running). Passing this threshold + will cause the cluster to be deleted. Minimum value is 10 + minutes; maximum value is 14 days (see JSON representation + of + `Duration `__. + auto_delete_time (~.timestamp.Timestamp): + Optional. The time when cluster will be auto-deleted. (see + JSON representation of + `Timestamp `__). + auto_delete_ttl (~.duration.Duration): + Optional. The lifetime duration of cluster. The cluster will + be auto-deleted at the end of this period. Minimum value is + 10 minutes; maximum value is 14 days (see JSON + representation of + `Duration `__). + idle_start_time (~.timestamp.Timestamp): + Output only. The time when cluster became idle (most recent + job finished) and became eligible for deletion due to + idleness (see JSON representation of + `Timestamp `__). + """ + + idle_delete_ttl = proto.Field(proto.MESSAGE, number=1, message=duration.Duration,) + + auto_delete_time = proto.Field( + proto.MESSAGE, number=2, oneof="ttl", message=timestamp.Timestamp, + ) + + auto_delete_ttl = proto.Field( + proto.MESSAGE, number=3, oneof="ttl", message=duration.Duration, + ) + + idle_start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class SecurityConfig(proto.Message): + r"""Security related configuration, including encryption, + Kerberos, etc. + + Attributes: + kerberos_config (~.gcd_clusters.KerberosConfig): + Kerberos related configuration. + """ + + kerberos_config = proto.Field(proto.MESSAGE, number=1, message="KerberosConfig",) + + +class KerberosConfig(proto.Message): + r"""Specifies Kerberos related configuration. + + Attributes: + enable_kerberos (bool): + Optional. Flag to indicate whether to + Kerberize the cluster (default: false). Set this + field to true to enable Kerberos on a cluster. + root_principal_password_uri (str): + Required. The Cloud Storage URI of a KMS + encrypted file containing the root principal + password. + kms_key_uri (str): + Required. The uri of the KMS key used to + encrypt various sensitive files. + keystore_uri (str): + Optional. The Cloud Storage URI of the + keystore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + truststore_uri (str): + Optional. The Cloud Storage URI of the + truststore file used for SSL encryption. If not + provided, Dataproc will provide a self-signed + certificate. + keystore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided keystore. For the self-signed + certificate, this password is generated by + Dataproc. + key_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided key. For the self-signed + certificate, this password is generated by + Dataproc. + truststore_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the password to the + user provided truststore. For the self-signed + certificate, this password is generated by + Dataproc. + cross_realm_trust_realm (str): + Optional. The remote realm the Dataproc on- + luster KDC will trust, should the user enable + cross realm trust. + cross_realm_trust_kdc (str): + Optional. The KDC (IP or hostname) for the + remote trusted realm in a cross realm trust + relationship. + cross_realm_trust_admin_server (str): + Optional. The admin server (IP or hostname) + for the remote trusted realm in a cross realm + trust relationship. + cross_realm_trust_shared_password_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the shared password + between the on-cluster Kerberos realm and the + remote trusted realm, in a cross realm trust + relationship. + kdc_db_key_uri (str): + Optional. The Cloud Storage URI of a KMS + encrypted file containing the master key of the + KDC database. + tgt_lifetime_hours (int): + Optional. The lifetime of the ticket granting + ticket, in hours. If not specified, or user + specifies 0, then default value 10 will be used. + realm (str): + Optional. The name of the on-cluster Kerberos + realm. If not specified, the uppercased domain + of hostnames will be the realm. + """ + + enable_kerberos = proto.Field(proto.BOOL, number=1) + + root_principal_password_uri = proto.Field(proto.STRING, number=2) + + kms_key_uri = proto.Field(proto.STRING, number=3) + + keystore_uri = proto.Field(proto.STRING, number=4) + + truststore_uri = proto.Field(proto.STRING, number=5) + + keystore_password_uri = proto.Field(proto.STRING, number=6) + + key_password_uri = proto.Field(proto.STRING, number=7) + + truststore_password_uri = proto.Field(proto.STRING, number=8) + + cross_realm_trust_realm = proto.Field(proto.STRING, number=9) + + cross_realm_trust_kdc = proto.Field(proto.STRING, number=10) + + cross_realm_trust_admin_server = proto.Field(proto.STRING, number=11) + + cross_realm_trust_shared_password_uri = proto.Field(proto.STRING, number=12) + + kdc_db_key_uri = proto.Field(proto.STRING, number=13) + + tgt_lifetime_hours = proto.Field(proto.INT32, number=14) + + realm = proto.Field(proto.STRING, number=15) + + +class NodeInitializationAction(proto.Message): + r"""Specifies an executable to run on a fully configured node and + a timeout period for executable completion. + + Attributes: + executable_file (str): + Required. Cloud Storage URI of executable + file. + execution_timeout (~.duration.Duration): + Optional. Amount of time executable has to complete. Default + is 10 minutes (see JSON representation of + `Duration `__). + + Cluster creation fails with an explanatory error message + (the name of the executable that caused the error and the + exceeded timeout period) if the executable is not completed + at end of the timeout period. + """ + + executable_file = proto.Field(proto.STRING, number=1) + + execution_timeout = proto.Field(proto.MESSAGE, number=2, message=duration.Duration,) + + +class ClusterStatus(proto.Message): + r"""The status of a cluster and its instances. + + Attributes: + state (~.gcd_clusters.ClusterStatus.State): + Output only. The cluster's state. + detail (str): + Output only. Optional details of cluster's + state. + state_start_time (~.timestamp.Timestamp): + Output only. Time when this state was entered (see JSON + representation of + `Timestamp `__). + substate (~.gcd_clusters.ClusterStatus.Substate): + Output only. Additional state information + that includes status reported by the agent. + """ + + class State(proto.Enum): + r"""The cluster state.""" + UNKNOWN = 0 + CREATING = 1 + RUNNING = 2 + ERROR = 3 + DELETING = 4 + UPDATING = 5 + STOPPING = 6 + STOPPED = 7 + STARTING = 8 + + class Substate(proto.Enum): + r"""The cluster substate.""" + UNSPECIFIED = 0 + UNHEALTHY = 1 + STALE_STATUS = 2 + + state = proto.Field(proto.ENUM, number=1, enum=State,) + + detail = proto.Field(proto.STRING, number=2) + + state_start_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp.Timestamp, + ) + + substate = proto.Field(proto.ENUM, number=4, enum=Substate,) + + +class SoftwareConfig(proto.Message): + r"""Specifies the selection and config of software inside the + cluster. + + Attributes: + image_version (str): + Optional. The version of software inside the cluster. It + must be one of the supported `Dataproc + Versions `__, + such as "1.2" (including a subminor version, such as + "1.2.29"), or the `"preview" + version `__. + If unspecified, it defaults to the latest Debian version. + properties (Sequence[~.gcd_clusters.SoftwareConfig.PropertiesEntry]): + Optional. The properties to set on daemon config files. + + Property keys are specified in ``prefix:property`` format, + for example ``core:hadoop.tmp.dir``. The following are + supported prefixes and their mappings: + + - capacity-scheduler: ``capacity-scheduler.xml`` + - core: ``core-site.xml`` + - distcp: ``distcp-default.xml`` + - hdfs: ``hdfs-site.xml`` + - hive: ``hive-site.xml`` + - mapred: ``mapred-site.xml`` + - pig: ``pig.properties`` + - spark: ``spark-defaults.conf`` + - yarn: ``yarn-site.xml`` + + For more information, see `Cluster + properties `__. + optional_components (Sequence[~.shared.Component]): + The set of optional components to activate on + the cluster. + """ + + image_version = proto.Field(proto.STRING, number=1) + + properties = proto.MapField(proto.STRING, proto.STRING, number=2) + + optional_components = proto.RepeatedField( + proto.ENUM, number=3, enum=shared.Component, + ) + + +class ClusterMetrics(proto.Message): + r"""Contains cluster daemon metrics, such as HDFS and YARN stats. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + hdfs_metrics (Sequence[~.gcd_clusters.ClusterMetrics.HdfsMetricsEntry]): + The HDFS metrics. + yarn_metrics (Sequence[~.gcd_clusters.ClusterMetrics.YarnMetricsEntry]): + The YARN metrics. + """ + + hdfs_metrics = proto.MapField(proto.STRING, proto.INT64, number=1) + + yarn_metrics = proto.MapField(proto.STRING, proto.INT64, number=2) + + +class CreateClusterRequest(proto.Message): + r"""A request to create a cluster. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster (~.gcd_clusters.Cluster): + Required. The cluster to create. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster = proto.Field(proto.MESSAGE, number=2, message=Cluster,) + + request_id = proto.Field(proto.STRING, number=4) + + +class UpdateClusterRequest(proto.Message): + r"""A request to update a cluster. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster (~.gcd_clusters.Cluster): + Required. The changes to the cluster. + graceful_decommission_timeout (~.duration.Duration): + Optional. Timeout for graceful YARN decomissioning. Graceful + decommissioning allows removing nodes from the cluster + without interrupting jobs in progress. Timeout specifies how + long to wait for jobs in progress to finish before + forcefully removing nodes (and potentially interrupting + jobs). Default timeout is 0 (for forceful decommission), and + the maximum allowed timeout is 1 day (see JSON + representation of + `Duration `__). + + Only supported on Dataproc image versions 1.2 and higher. + update_mask (~.field_mask.FieldMask): + Required. Specifies the path, relative to ``Cluster``, of + the field to update. For example, to change the number of + workers in a cluster to 5, the ``update_mask`` parameter + would be specified as + ``config.worker_config.num_instances``, and the ``PATCH`` + request body would specify the new value, as follows: + + :: + + { + "config":{ + "workerConfig":{ + "numInstances":"5" + } + } + } + + Similarly, to change the number of preemptible workers in a + cluster to 5, the ``update_mask`` parameter would be + ``config.secondary_worker_config.num_instances``, and the + ``PATCH`` request body would be set as follows: + + :: + + { + "config":{ + "secondaryWorkerConfig":{ + "numInstances":"5" + } + } + } + + Note: currently only the following fields can be updated: + + .. raw:: html + + + + + + + + + + + + + + + + + + + + + + + + + + +
MaskPurpose
labelsUpdates labels
config.worker_config.num_instancesResize primary worker + group
config.secondary_worker_config.num_instancesResize secondary + worker group
config.lifecycle_config.auto_delete_ttlReset MAX TTL + duration
config.lifecycle_config.auto_delete_timeUpdate MAX TTL + deletion timestamp
config.lifecycle_config.idle_delete_ttlUpdate Idle TTL + duration
config.autoscaling_config.policy_uriUse, stop using, or change + autoscaling policies
+ request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=5) + + cluster_name = proto.Field(proto.STRING, number=2) + + cluster = proto.Field(proto.MESSAGE, number=3, message=Cluster,) + + graceful_decommission_timeout = proto.Field( + proto.MESSAGE, number=6, message=duration.Duration, + ) + + update_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask.FieldMask,) + + request_id = proto.Field(proto.STRING, number=7) + + +class DeleteClusterRequest(proto.Message): + r"""A request to delete a cluster. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + cluster_uuid (str): + Optional. Specifying the ``cluster_uuid`` means the RPC + should fail (with error NOT_FOUND) if cluster with specified + UUID does not exist. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] + requests with the same id, then the second request will be + ignored and the first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=2) + + cluster_uuid = proto.Field(proto.STRING, number=4) + + request_id = proto.Field(proto.STRING, number=5) + + +class GetClusterRequest(proto.Message): + r"""Request to get the resource representation for a cluster in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=2) + + +class ListClustersRequest(proto.Message): + r"""A request to list the clusters in a project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + filter (str): + Optional. A filter constraining the clusters to list. + Filters are case-sensitive and have the following syntax: + + field = value [AND [field = value]] ... + + where **field** is one of ``status.state``, ``clusterName``, + or ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** + can be ``*`` to match all values. ``status.state`` can be + one of the following: ``ACTIVE``, ``INACTIVE``, + ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or + ``UPDATING``. ``ACTIVE`` contains the ``CREATING``, + ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains + the ``DELETING`` and ``ERROR`` states. ``clusterName`` is + the name of the cluster provided at creation time. Only the + logical ``AND`` operator is supported; space-separated items + are treated as having an implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND clusterName = mycluster AND + labels.env = staging AND labels.starred = \* + page_size (int): + Optional. The standard List page size. + page_token (str): + Optional. The standard List page token. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=4) + + filter = proto.Field(proto.STRING, number=5) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListClustersResponse(proto.Message): + r"""The list of all clusters in a project. + + Attributes: + clusters (Sequence[~.gcd_clusters.Cluster]): + Output only. The clusters in the project. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ListClustersRequest. + """ + + @property + def raw_page(self): + return self + + clusters = proto.RepeatedField(proto.MESSAGE, number=1, message=Cluster,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DiagnoseClusterRequest(proto.Message): + r"""A request to collect cluster diagnostic information. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the cluster belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + cluster_name (str): + Required. The cluster name. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=2) + + +class DiagnoseClusterResults(proto.Message): + r"""The location of diagnostic output. + + Attributes: + output_uri (str): + Output only. The Cloud Storage URI of the + diagnostic output. The output report is a plain + text file with a summary of collected + diagnostics. + """ + + output_uri = proto.Field(proto.STRING, number=1) + + +class ReservationAffinity(proto.Message): + r"""Reservation Affinity for consuming Zonal reservation. + + Attributes: + consume_reservation_type (~.gcd_clusters.ReservationAffinity.Type): + Optional. Type of reservation to consume + key (str): + Optional. Corresponds to the label key of + reservation resource. + values (Sequence[str]): + Optional. Corresponds to the label values of + reservation resource. + """ + + class Type(proto.Enum): + r"""Indicates whether to consume capacity from an reservation or + not. + """ + TYPE_UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + consume_reservation_type = proto.Field(proto.ENUM, number=1, enum=Type,) + + key = proto.Field(proto.STRING, number=2) + + values = proto.RepeatedField(proto.STRING, number=3) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1beta2/types/jobs.py b/google/cloud/dataproc_v1beta2/types/jobs.py new file mode 100644 index 00000000..b94e9a3c --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/jobs.py @@ -0,0 +1,1072 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1beta2", + manifest={ + "LoggingConfig", + "HadoopJob", + "SparkJob", + "PySparkJob", + "QueryList", + "HiveJob", + "SparkSqlJob", + "PigJob", + "SparkRJob", + "PrestoJob", + "JobPlacement", + "JobStatus", + "JobReference", + "YarnApplication", + "Job", + "JobScheduling", + "JobMetadata", + "SubmitJobRequest", + "GetJobRequest", + "ListJobsRequest", + "UpdateJobRequest", + "ListJobsResponse", + "CancelJobRequest", + "DeleteJobRequest", + }, +) + + +class LoggingConfig(proto.Message): + r"""The runtime logging config of the job. + + Attributes: + driver_log_levels (Sequence[~.gcd_jobs.LoggingConfig.DriverLogLevelsEntry]): + The per-package log levels for the driver. + This may include "root" package name to + configure rootLogger. Examples: + 'com.google = FATAL', 'root = INFO', + 'org.apache = DEBUG' + """ + + class Level(proto.Enum): + r"""The Log4j level for job execution. When running an `Apache + Hive `__ job, Cloud Dataproc configures the + Hive client to an equivalent verbosity level. + """ + LEVEL_UNSPECIFIED = 0 + ALL = 1 + TRACE = 2 + DEBUG = 3 + INFO = 4 + WARN = 5 + ERROR = 6 + FATAL = 7 + OFF = 8 + + driver_log_levels = proto.MapField(proto.STRING, proto.ENUM, number=2, enum=Level,) + + +class HadoopJob(proto.Message): + r"""A Dataproc job for running `Apache Hadoop + MapReduce `__ + jobs on `Apache Hadoop + YARN `__. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file containing the + main class. Examples: + 'gs://foo-bucket/analytics-binaries/extract- + useful-metrics-mr.jar' 'hdfs:/tmp/test- + samples/custom-wordcount.jar' + 'file:///home/usr/lib/hadoop-mapreduce/hadoop- + mapreduce-examples.jar' + main_class (str): + The name of the driver's main class. The jar file containing + the class must be in the default CLASSPATH or specified in + ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``-libjars`` or ``-Dfoo=bar``, + that can be set as job properties, since a collision may + occur that causes an incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. Jar file URIs to add to the + CLASSPATHs of the Hadoop driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS (Hadoop Compatible Filesystem) + URIs of files to be copied to the working + directory of Hadoop drivers and distributed + tasks. Useful for naively parallel tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of Hadoop + drivers and tasks. Supported file types: .jar, + .tar, .tar.gz, .tgz, or .zip. + properties (Sequence[~.gcd_jobs.HadoopJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Hadoop. Properties that conflict with values set + by the Dataproc API may be overwritten. Can include + properties set in /etc/hadoop/conf/*-site and classes in + user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver") + + main_class = proto.Field(proto.STRING, number=2, oneof="driver") + + args = proto.RepeatedField(proto.STRING, number=3) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=4) + + file_uris = proto.RepeatedField(proto.STRING, number=5) + + archive_uris = proto.RepeatedField(proto.STRING, number=6) + + properties = proto.MapField(proto.STRING, proto.STRING, number=7) + + logging_config = proto.Field(proto.MESSAGE, number=8, message=LoggingConfig,) + + +class SparkJob(proto.Message): + r"""A Dataproc job for running `Apache + Spark `__ applications on YARN. The + specification of the main method to call to drive the job. Specify + either the jar file that contains the main class or the main class + name. To pass both a main jar and a main class in that jar, add the + jar to ``CommonJob.jar_file_uris``, and then specify the main class + name in ``main_class``. + + Attributes: + main_jar_file_uri (str): + The HCFS URI of the jar file that contains + the main class. + main_class (str): + The name of the driver's main class. The jar file that + contains the class must be in the default CLASSPATH or + specified in ``jar_file_uris``. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Spark driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be copied to + the working directory of Spark drivers and + distributed tasks. Useful for naively parallel + tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of Spark + drivers and tasks. Supported file types: .jar, + .tar, .tar.gz, .tgz, and .zip. + properties (Sequence[~.gcd_jobs.SparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark. Properties that + conflict with values set by the Dataproc API may + be overwritten. Can include properties set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver") + + main_class = proto.Field(proto.STRING, number=2, oneof="driver") + + args = proto.RepeatedField(proto.STRING, number=3) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=4) + + file_uris = proto.RepeatedField(proto.STRING, number=5) + + archive_uris = proto.RepeatedField(proto.STRING, number=6) + + properties = proto.MapField(proto.STRING, proto.STRING, number=7) + + logging_config = proto.Field(proto.MESSAGE, number=8, message=LoggingConfig,) + + +class PySparkJob(proto.Message): + r"""A Dataproc job for running `Apache + PySpark `__ + applications on YARN. + + Attributes: + main_python_file_uri (str): + Required. The HCFS URI of the main Python + file to use as the driver. Must be a .py file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + python_file_uris (Sequence[str]): + Optional. HCFS file URIs of Python files to + pass to the PySpark framework. Supported file + types: .py, .egg, and .zip. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATHs of the Python driver and tasks. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be copied to + the working directory of Python drivers and + distributed tasks. Useful for naively parallel + tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of .jar, + .tar, .tar.gz, .tgz, and .zip. + properties (Sequence[~.gcd_jobs.PySparkJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure PySpark. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_python_file_uri = proto.Field(proto.STRING, number=1) + + args = proto.RepeatedField(proto.STRING, number=2) + + python_file_uris = proto.RepeatedField(proto.STRING, number=3) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=4) + + file_uris = proto.RepeatedField(proto.STRING, number=5) + + archive_uris = proto.RepeatedField(proto.STRING, number=6) + + properties = proto.MapField(proto.STRING, proto.STRING, number=7) + + logging_config = proto.Field(proto.MESSAGE, number=8, message=LoggingConfig,) + + +class QueryList(proto.Message): + r"""A list of queries to run on a cluster. + + Attributes: + queries (Sequence[str]): + Required. The queries to execute. You do not need to + terminate a query with a semicolon. Multiple queries can be + specified in one string by separating each with a semicolon. + Here is an example of an Cloud Dataproc API snippet that + uses a QueryList to specify a HiveJob: + + :: + + "hiveJob": { + "queryList": { + "queries": [ + "query1", + "query2", + "query3;query4", + ] + } + } + """ + + queries = proto.RepeatedField(proto.STRING, number=1) + + +class HiveJob(proto.Message): + r"""A Dataproc job for running `Apache + Hive `__ queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains Hive + queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[~.gcd_jobs.HiveJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Hive command: ``SET name="value";``). + properties (Sequence[~.gcd_jobs.HiveJob.PropertiesEntry]): + Optional. A mapping of property names and values, used to + configure Hive. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/hive/conf/hive-site.xml, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Hive server and Hadoop + MapReduce (MR) tasks. Can contain Hive SerDes + and UDFs. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + continue_on_failure = proto.Field(proto.BOOL, number=3) + + script_variables = proto.MapField(proto.STRING, proto.STRING, number=4) + + properties = proto.MapField(proto.STRING, proto.STRING, number=5) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=6) + + +class SparkSqlJob(proto.Message): + r"""A Dataproc job for running `Apache Spark + SQL `__ queries. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + script_variables (Sequence[~.gcd_jobs.SparkSqlJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Spark SQL command: SET + ``name="value";``). + properties (Sequence[~.gcd_jobs.SparkSqlJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure Spark SQL's SparkConf. + Properties that conflict with values set by the + Dataproc API may be overwritten. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to be added + to the Spark CLASSPATH. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + script_variables = proto.MapField(proto.STRING, proto.STRING, number=3) + + properties = proto.MapField(proto.STRING, proto.STRING, number=4) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=56) + + logging_config = proto.Field(proto.MESSAGE, number=6, message=LoggingConfig,) + + +class PigJob(proto.Message): + r"""A Dataproc job for running `Apache Pig `__ + queries on YARN. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains the + Pig queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + script_variables (Sequence[~.gcd_jobs.PigJob.ScriptVariablesEntry]): + Optional. Mapping of query variable names to values + (equivalent to the Pig command: ``name=[value]``). + properties (Sequence[~.gcd_jobs.PigJob.PropertiesEntry]): + Optional. A mapping of property names to values, used to + configure Pig. Properties that conflict with values set by + the Dataproc API may be overwritten. Can include properties + set in /etc/hadoop/conf/*-site.xml, + /etc/pig/conf/pig.properties, and classes in user code. + jar_file_uris (Sequence[str]): + Optional. HCFS URIs of jar files to add to + the CLASSPATH of the Pig Client and Hadoop + MapReduce (MR) tasks. Can contain Pig UDFs. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + continue_on_failure = proto.Field(proto.BOOL, number=3) + + script_variables = proto.MapField(proto.STRING, proto.STRING, number=4) + + properties = proto.MapField(proto.STRING, proto.STRING, number=5) + + jar_file_uris = proto.RepeatedField(proto.STRING, number=6) + + logging_config = proto.Field(proto.MESSAGE, number=7, message=LoggingConfig,) + + +class SparkRJob(proto.Message): + r"""A Dataproc job for running `Apache + SparkR `__ + applications on YARN. + + Attributes: + main_r_file_uri (str): + Required. The HCFS URI of the main R file to + use as the driver. Must be a .R file. + args (Sequence[str]): + Optional. The arguments to pass to the driver. Do not + include arguments, such as ``--conf``, that can be set as + job properties, since a collision may occur that causes an + incorrect job submission. + file_uris (Sequence[str]): + Optional. HCFS URIs of files to be copied to + the working directory of R drivers and + distributed tasks. Useful for naively parallel + tasks. + archive_uris (Sequence[str]): + Optional. HCFS URIs of archives to be + extracted in the working directory of Spark + drivers and tasks. Supported file types: .jar, + .tar, .tar.gz, .tgz, and .zip. + properties (Sequence[~.gcd_jobs.SparkRJob.PropertiesEntry]): + Optional. A mapping of property names to + values, used to configure SparkR. Properties + that conflict with values set by the Dataproc + API may be overwritten. Can include properties + set in + /etc/spark/conf/spark-defaults.conf and classes + in user code. + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + main_r_file_uri = proto.Field(proto.STRING, number=1) + + args = proto.RepeatedField(proto.STRING, number=2) + + file_uris = proto.RepeatedField(proto.STRING, number=3) + + archive_uris = proto.RepeatedField(proto.STRING, number=4) + + properties = proto.MapField(proto.STRING, proto.STRING, number=5) + + logging_config = proto.Field(proto.MESSAGE, number=6, message=LoggingConfig,) + + +class PrestoJob(proto.Message): + r"""A Dataproc job for running `Presto `__ + queries. **IMPORTANT**: The `Dataproc Presto Optional + Component `__ + must be enabled when the cluster is created to submit a Presto job + to the cluster. + + Attributes: + query_file_uri (str): + The HCFS URI of the script that contains SQL + queries. + query_list (~.gcd_jobs.QueryList): + A list of queries. + continue_on_failure (bool): + Optional. Whether to continue executing queries if a query + fails. The default value is ``false``. Setting to ``true`` + can be useful when executing independent parallel queries. + output_format (str): + Optional. The format in which query output + will be displayed. See the Presto documentation + for supported output formats + client_tags (Sequence[str]): + Optional. Presto client tags to attach to + this query + properties (Sequence[~.gcd_jobs.PrestoJob.PropertiesEntry]): + Optional. A mapping of property names to values. Used to set + Presto `session + properties `__ + Equivalent to using the --session flag in the Presto CLI + logging_config (~.gcd_jobs.LoggingConfig): + Optional. The runtime log config for job + execution. + """ + + query_file_uri = proto.Field(proto.STRING, number=1, oneof="queries") + + query_list = proto.Field( + proto.MESSAGE, number=2, oneof="queries", message=QueryList, + ) + + continue_on_failure = proto.Field(proto.BOOL, number=3) + + output_format = proto.Field(proto.STRING, number=4) + + client_tags = proto.RepeatedField(proto.STRING, number=5) + + properties = proto.MapField(proto.STRING, proto.STRING, number=6) + + logging_config = proto.Field(proto.MESSAGE, number=7, message=LoggingConfig,) + + +class JobPlacement(proto.Message): + r"""Dataproc job config. + + Attributes: + cluster_name (str): + Required. The name of the cluster where the + job will be submitted. + cluster_uuid (str): + Output only. A cluster UUID generated by the + Dataproc service when the job is submitted. + """ + + cluster_name = proto.Field(proto.STRING, number=1) + + cluster_uuid = proto.Field(proto.STRING, number=2) + + +class JobStatus(proto.Message): + r"""Dataproc job status. + + Attributes: + state (~.gcd_jobs.JobStatus.State): + Output only. A state message specifying the + overall job state. + details (str): + Output only. Optional Job state details, such + as an error description if the state is + ERROR. + state_start_time (~.timestamp.Timestamp): + Output only. The time when this state was + entered. + substate (~.gcd_jobs.JobStatus.Substate): + Output only. Additional state information, + which includes status reported by the agent. + """ + + class State(proto.Enum): + r"""The job state.""" + STATE_UNSPECIFIED = 0 + PENDING = 1 + SETUP_DONE = 8 + RUNNING = 2 + CANCEL_PENDING = 3 + CANCEL_STARTED = 7 + CANCELLED = 4 + DONE = 5 + ERROR = 6 + ATTEMPT_FAILURE = 9 + + class Substate(proto.Enum): + r"""The job substate.""" + UNSPECIFIED = 0 + SUBMITTED = 1 + QUEUED = 2 + STALE_STATUS = 3 + + state = proto.Field(proto.ENUM, number=1, enum=State,) + + details = proto.Field(proto.STRING, number=2) + + state_start_time = proto.Field( + proto.MESSAGE, number=6, message=timestamp.Timestamp, + ) + + substate = proto.Field(proto.ENUM, number=7, enum=Substate,) + + +class JobReference(proto.Message): + r"""Encapsulates the full scoping used to reference a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + job_id (str): + Optional. The job ID, which must be unique within the + project. The ID must contain only letters (a-z, A-Z), + numbers (0-9), underscores (_), or hyphens (-). The maximum + length is 100 characters. + + If not specified by the caller, the job ID will be provided + by the server. + """ + + project_id = proto.Field(proto.STRING, number=1) + + job_id = proto.Field(proto.STRING, number=2) + + +class YarnApplication(proto.Message): + r"""A YARN application created by a job. Application information is a + subset of + org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. + + **Beta Feature**: This report is available for testing purposes + only. It may be changed before final release. + + Attributes: + name (str): + Output only. The application name. + state (~.gcd_jobs.YarnApplication.State): + Output only. The application state. + progress (float): + Output only. The numerical progress of the + application, from 1 to 100. + tracking_url (str): + Output only. The HTTP URL of the + ApplicationMaster, HistoryServer, or + TimelineServer that provides application- + specific information. The URL uses the internal + hostname, and requires a proxy server for + resolution and, possibly, access. + """ + + class State(proto.Enum): + r"""The application state, corresponding to + YarnProtos.YarnApplicationStateProto. + """ + STATE_UNSPECIFIED = 0 + NEW = 1 + NEW_SAVING = 2 + SUBMITTED = 3 + ACCEPTED = 4 + RUNNING = 5 + FINISHED = 6 + FAILED = 7 + KILLED = 8 + + name = proto.Field(proto.STRING, number=1) + + state = proto.Field(proto.ENUM, number=2, enum=State,) + + progress = proto.Field(proto.FLOAT, number=3) + + tracking_url = proto.Field(proto.STRING, number=4) + + +class Job(proto.Message): + r"""A Dataproc job resource. + + Attributes: + reference (~.gcd_jobs.JobReference): + Optional. The fully qualified reference to the job, which + can be used to obtain the equivalent REST path of the job + resource. If this property is not specified when a job is + created, the server generates a job_id. + placement (~.gcd_jobs.JobPlacement): + Required. Job information, including how, + when, and where to run the job. + hadoop_job (~.gcd_jobs.HadoopJob): + Optional. Job is a Hadoop job. + spark_job (~.gcd_jobs.SparkJob): + Optional. Job is a Spark job. + pyspark_job (~.gcd_jobs.PySparkJob): + Optional. Job is a PySpark job. + hive_job (~.gcd_jobs.HiveJob): + Optional. Job is a Hive job. + pig_job (~.gcd_jobs.PigJob): + Optional. Job is a Pig job. + spark_r_job (~.gcd_jobs.SparkRJob): + Optional. Job is a SparkR job. + spark_sql_job (~.gcd_jobs.SparkSqlJob): + Optional. Job is a SparkSql job. + presto_job (~.gcd_jobs.PrestoJob): + Optional. Job is a Presto job. + status (~.gcd_jobs.JobStatus): + Output only. The job status. Additional application-specific + status information may be contained in the type_job and + yarn_applications fields. + status_history (Sequence[~.gcd_jobs.JobStatus]): + Output only. The previous job status. + yarn_applications (Sequence[~.gcd_jobs.YarnApplication]): + Output only. The collection of YARN applications spun up by + this job. + + **Beta** Feature: This report is available for testing + purposes only. It may be changed before final release. + submitted_by (str): + Output only. The email address of the user + submitting the job. For jobs submitted on the + cluster, the address is + username@hostname. + driver_output_resource_uri (str): + Output only. A URI pointing to the location + of the stdout of the job's driver program. + driver_control_files_uri (str): + Output only. If present, the location of miscellaneous + control files which may be used as part of job setup and + handling. If not present, control files may be placed in the + same location as ``driver_output_uri``. + labels (Sequence[~.gcd_jobs.Job.LabelsEntry]): + Optional. The labels to associate with this job. Label + **keys** must contain 1 to 63 characters, and must conform + to `RFC 1035 `__. + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. No more than + 32 labels can be associated with a job. + scheduling (~.gcd_jobs.JobScheduling): + Optional. Job scheduling configuration. + job_uuid (str): + Output only. A UUID that uniquely identifies a job within + the project over time. This is in contrast to a + user-settable reference.job_id that may be reused over time. + done (bool): + Output only. Indicates whether the job is completed. If the + value is ``false``, the job is still in progress. If + ``true``, the job is completed, and ``status.state`` field + will indicate if it was successful, failed, or cancelled. + """ + + reference = proto.Field(proto.MESSAGE, number=1, message=JobReference,) + + placement = proto.Field(proto.MESSAGE, number=2, message=JobPlacement,) + + hadoop_job = proto.Field( + proto.MESSAGE, number=3, oneof="type_job", message=HadoopJob, + ) + + spark_job = proto.Field( + proto.MESSAGE, number=4, oneof="type_job", message=SparkJob, + ) + + pyspark_job = proto.Field( + proto.MESSAGE, number=5, oneof="type_job", message=PySparkJob, + ) + + hive_job = proto.Field(proto.MESSAGE, number=6, oneof="type_job", message=HiveJob,) + + pig_job = proto.Field(proto.MESSAGE, number=7, oneof="type_job", message=PigJob,) + + spark_r_job = proto.Field( + proto.MESSAGE, number=21, oneof="type_job", message=SparkRJob, + ) + + spark_sql_job = proto.Field( + proto.MESSAGE, number=12, oneof="type_job", message=SparkSqlJob, + ) + + presto_job = proto.Field( + proto.MESSAGE, number=23, oneof="type_job", message=PrestoJob, + ) + + status = proto.Field(proto.MESSAGE, number=8, message=JobStatus,) + + status_history = proto.RepeatedField(proto.MESSAGE, number=13, message=JobStatus,) + + yarn_applications = proto.RepeatedField( + proto.MESSAGE, number=9, message=YarnApplication, + ) + + submitted_by = proto.Field(proto.STRING, number=10) + + driver_output_resource_uri = proto.Field(proto.STRING, number=17) + + driver_control_files_uri = proto.Field(proto.STRING, number=15) + + labels = proto.MapField(proto.STRING, proto.STRING, number=18) + + scheduling = proto.Field(proto.MESSAGE, number=20, message="JobScheduling",) + + job_uuid = proto.Field(proto.STRING, number=22) + + done = proto.Field(proto.BOOL, number=24) + + +class JobScheduling(proto.Message): + r"""Job scheduling options. + + Attributes: + max_failures_per_hour (int): + Optional. Maximum number of times per hour a + driver may be restarted as a result of driver + terminating with non-zero code before job is + reported failed. + + A job may be reported as thrashing if driver + exits with non-zero code 4 times within 10 + minute window. + + Maximum value is 10. + """ + + max_failures_per_hour = proto.Field(proto.INT32, number=1) + + +class JobMetadata(proto.Message): + r"""Job Operation metadata. + + Attributes: + job_id (str): + Output only. The job id. + status (~.gcd_jobs.JobStatus): + Output only. Most recent job status. + operation_type (str): + Output only. Operation type. + start_time (~.timestamp.Timestamp): + Output only. Job submission time. + """ + + job_id = proto.Field(proto.STRING, number=1) + + status = proto.Field(proto.MESSAGE, number=2, message=JobStatus,) + + operation_type = proto.Field(proto.STRING, number=3) + + start_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + +class SubmitJobRequest(proto.Message): + r"""A request to submit a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job (~.gcd_jobs.Job): + Required. The job resource. + request_id (str): + Optional. A unique id used to identify the request. If the + server receives two + [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] + requests with the same id, then the second request will be + ignored and the first + [Job][google.cloud.dataproc.v1beta2.Job] created and stored + in the backend is returned. + + It is recommended to always set this value to a + `UUID `__. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job = proto.Field(proto.MESSAGE, number=2, message=Job,) + + request_id = proto.Field(proto.STRING, number=4) + + +class GetJobRequest(proto.Message): + r"""A request to get the resource representation for a job in a + project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job_id = proto.Field(proto.STRING, number=2) + + +class ListJobsRequest(proto.Message): + r"""A request to list jobs in a project. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + page_size (int): + Optional. The number of results to return in + each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + cluster_name (str): + Optional. If set, the returned jobs list + includes only jobs that were submitted to the + named cluster. + job_state_matcher (~.gcd_jobs.ListJobsRequest.JobStateMatcher): + Optional. Specifies enumerated categories of jobs to list. + (default = match ALL jobs). + + If ``filter`` is provided, ``jobStateMatcher`` will be + ignored. + filter (str): + Optional. A filter constraining the jobs to list. Filters + are case-sensitive and have the following syntax: + + [field = value] AND [field [= value]] ... + + where **field** is ``status.state`` or ``labels.[KEY]``, and + ``[KEY]`` is a label key. **value** can be ``*`` to match + all values. ``status.state`` can be either ``ACTIVE`` or + ``NON_ACTIVE``. Only the logical ``AND`` operator is + supported; space-separated items are treated as having an + implicit ``AND`` operator. + + Example filter: + + status.state = ACTIVE AND labels.env = staging AND + labels.starred = \* + """ + + class JobStateMatcher(proto.Enum): + r"""A matcher that specifies categories of job states.""" + ALL = 0 + ACTIVE = 1 + NON_ACTIVE = 2 + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=6) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + cluster_name = proto.Field(proto.STRING, number=4) + + job_state_matcher = proto.Field(proto.ENUM, number=5, enum=JobStateMatcher,) + + filter = proto.Field(proto.STRING, number=7) + + +class UpdateJobRequest(proto.Message): + r"""A request to update a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + job (~.gcd_jobs.Job): + Required. The changes to the job. + update_mask (~.field_mask.FieldMask): + Required. Specifies the path, relative to Job, of the field + to update. For example, to update the labels of a Job the + update_mask parameter would be specified as labels, and the + ``PATCH`` request body would specify the new value. Note: + Currently, labels is the only field that can be updated. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=2) + + job_id = proto.Field(proto.STRING, number=3) + + job = proto.Field(proto.MESSAGE, number=4, message=Job,) + + update_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask.FieldMask,) + + +class ListJobsResponse(proto.Message): + r"""A list of jobs in a project. + + Attributes: + jobs (Sequence[~.gcd_jobs.Job]): + Output only. Jobs list. + next_page_token (str): + Optional. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the ``page_token`` in a subsequent + ListJobsRequest. + """ + + @property + def raw_page(self): + return self + + jobs = proto.RepeatedField(proto.MESSAGE, number=1, message=Job,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class CancelJobRequest(proto.Message): + r"""A request to cancel a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job_id = proto.Field(proto.STRING, number=2) + + +class DeleteJobRequest(proto.Message): + r"""A request to delete a job. + + Attributes: + project_id (str): + Required. The ID of the Google Cloud Platform + project that the job belongs to. + region (str): + Required. The Dataproc region in which to + handle the request. + job_id (str): + Required. The job ID. + """ + + project_id = proto.Field(proto.STRING, number=1) + + region = proto.Field(proto.STRING, number=3) + + job_id = proto.Field(proto.STRING, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1beta2/types/operations.py b/google/cloud/dataproc_v1beta2/types/operations.py new file mode 100644 index 00000000..b43dc854 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/operations.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1beta2", + manifest={"ClusterOperationStatus", "ClusterOperationMetadata",}, +) + + +class ClusterOperationStatus(proto.Message): + r"""The status of the operation. + + Attributes: + state (~.operations.ClusterOperationStatus.State): + Output only. A message containing the + operation state. + inner_state (str): + Output only. A message containing the + detailed operation state. + details (str): + Output only. A message containing any + operation metadata details. + state_start_time (~.timestamp.Timestamp): + Output only. The time this state was entered. + """ + + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + state = proto.Field(proto.ENUM, number=1, enum=State,) + + inner_state = proto.Field(proto.STRING, number=2) + + details = proto.Field(proto.STRING, number=3) + + state_start_time = proto.Field( + proto.MESSAGE, number=4, message=timestamp.Timestamp, + ) + + +class ClusterOperationMetadata(proto.Message): + r"""Metadata describing the operation. + + Attributes: + cluster_name (str): + Output only. Name of the cluster for the + operation. + cluster_uuid (str): + Output only. Cluster UUID for the operation. + status (~.operations.ClusterOperationStatus): + Output only. Current operation status. + status_history (Sequence[~.operations.ClusterOperationStatus]): + Output only. The previous operation status. + operation_type (str): + Output only. The operation type. + description (str): + Output only. Short description of operation. + labels (Sequence[~.operations.ClusterOperationMetadata.LabelsEntry]): + Output only. Labels associated with the + operation + warnings (Sequence[str]): + Output only. Errors encountered during + operation execution. + """ + + cluster_name = proto.Field(proto.STRING, number=7) + + cluster_uuid = proto.Field(proto.STRING, number=8) + + status = proto.Field(proto.MESSAGE, number=9, message=ClusterOperationStatus,) + + status_history = proto.RepeatedField( + proto.MESSAGE, number=10, message=ClusterOperationStatus, + ) + + operation_type = proto.Field(proto.STRING, number=11) + + description = proto.Field(proto.STRING, number=12) + + labels = proto.MapField(proto.STRING, proto.STRING, number=13) + + warnings = proto.RepeatedField(proto.STRING, number=14) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1beta2/types/shared.py b/google/cloud/dataproc_v1beta2/types/shared.py new file mode 100644 index 00000000..d1c3e288 --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/shared.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1beta2", manifest={"Component",}, +) + + +class Component(proto.Enum): + r"""Cluster components that can be activated.""" + COMPONENT_UNSPECIFIED = 0 + ANACONDA = 5 + DOCKER = 13 + DRUID = 9 + FLINK = 14 + HIVE_WEBHCAT = 3 + JUPYTER = 1 + KERBEROS = 7 + PRESTO = 6 + RANGER = 12 + SOLR = 10 + ZEPPELIN = 4 + ZOOKEEPER = 8 + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1beta2/types/workflow_templates.py b/google/cloud/dataproc_v1beta2/types/workflow_templates.py new file mode 100644 index 00000000..50319c8b --- /dev/null +++ b/google/cloud/dataproc_v1beta2/types/workflow_templates.py @@ -0,0 +1,849 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import proto # type: ignore + + +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1beta2", + manifest={ + "WorkflowTemplate", + "WorkflowTemplatePlacement", + "ManagedCluster", + "ClusterSelector", + "OrderedJob", + "TemplateParameter", + "ParameterValidation", + "RegexValidation", + "ValueValidation", + "WorkflowMetadata", + "ClusterOperation", + "WorkflowGraph", + "WorkflowNode", + "CreateWorkflowTemplateRequest", + "GetWorkflowTemplateRequest", + "InstantiateWorkflowTemplateRequest", + "InstantiateInlineWorkflowTemplateRequest", + "UpdateWorkflowTemplateRequest", + "ListWorkflowTemplatesRequest", + "ListWorkflowTemplatesResponse", + "DeleteWorkflowTemplateRequest", + }, +) + + +class WorkflowTemplate(proto.Message): + r"""A Dataproc workflow template resource. + + Attributes: + id (str): + Required. The template id. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + + . + name (str): + Output only. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. Used to perform a consistent read-modify-write. + + This field should be left blank for a + ``CreateWorkflowTemplate`` request. It is required for an + ``UpdateWorkflowTemplate`` request, and must match the + current server version. A typical update template flow would + fetch the current template with a ``GetWorkflowTemplate`` + request, which will return the current template with the + ``version`` field filled in with the current server version. + The user updates other fields in the template, then returns + it as part of the ``UpdateWorkflowTemplate`` request. + create_time (~.timestamp.Timestamp): + Output only. The time template was created. + update_time (~.timestamp.Timestamp): + Output only. The time template was last + updated. + labels (Sequence[~.workflow_templates.WorkflowTemplate.LabelsEntry]): + Optional. The labels to associate with this template. These + labels will be propagated to all jobs and clusters created + by the workflow instance. + + Label **keys** must contain 1 to 63 characters, and must + conform to `RFC + 1035 `__. + + Label **values** may be empty, but, if present, must contain + 1 to 63 characters, and must conform to `RFC + 1035 `__. + + No more than 32 labels can be associated with a template. + placement (~.workflow_templates.WorkflowTemplatePlacement): + Required. WorkflowTemplate scheduling + information. + jobs (Sequence[~.workflow_templates.OrderedJob]): + Required. The Directed Acyclic Graph of Jobs + to submit. + parameters (Sequence[~.workflow_templates.TemplateParameter]): + Optional. Template parameters whose values + are substituted into the template. Values for + parameters must be provided when the template is + instantiated. + """ + + id = proto.Field(proto.STRING, number=2) + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=3) + + create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,) + + update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp.Timestamp,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=6) + + placement = proto.Field( + proto.MESSAGE, number=7, message="WorkflowTemplatePlacement", + ) + + jobs = proto.RepeatedField(proto.MESSAGE, number=8, message="OrderedJob",) + + parameters = proto.RepeatedField( + proto.MESSAGE, number=9, message="TemplateParameter", + ) + + +class WorkflowTemplatePlacement(proto.Message): + r"""Specifies workflow execution target. + + Either ``managed_cluster`` or ``cluster_selector`` is required. + + Attributes: + managed_cluster (~.workflow_templates.ManagedCluster): + Optional. A cluster that is managed by the + workflow. + cluster_selector (~.workflow_templates.ClusterSelector): + Optional. A selector that chooses target + cluster for jobs based on metadata. + + The selector is evaluated at the time each job + is submitted. + """ + + managed_cluster = proto.Field( + proto.MESSAGE, number=1, oneof="placement", message="ManagedCluster", + ) + + cluster_selector = proto.Field( + proto.MESSAGE, number=2, oneof="placement", message="ClusterSelector", + ) + + +class ManagedCluster(proto.Message): + r"""Cluster that is managed by the workflow. + + Attributes: + cluster_name (str): + Required. The cluster name prefix. A unique + cluster name will be formed by appending a + random suffix. + The name must contain only lower-case letters + (a-z), numbers (0-9), and hyphens (-). Must + begin with a letter. Cannot begin or end with + hyphen. Must consist of between 2 and 35 + characters. + config (~.clusters.ClusterConfig): + Required. The cluster configuration. + labels (Sequence[~.workflow_templates.ManagedCluster.LabelsEntry]): + Optional. The labels to associate with this cluster. + + Label keys must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following PCRE regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given + cluster. + """ + + cluster_name = proto.Field(proto.STRING, number=2) + + config = proto.Field(proto.MESSAGE, number=3, message=clusters.ClusterConfig,) + + labels = proto.MapField(proto.STRING, proto.STRING, number=4) + + +class ClusterSelector(proto.Message): + r"""A selector that chooses target cluster for jobs based on + metadata. + + Attributes: + zone (str): + Optional. The zone where workflow process + executes. This parameter does not affect the + selection of the cluster. + If unspecified, the zone of the first cluster + matching the selector is used. + cluster_labels (Sequence[~.workflow_templates.ClusterSelector.ClusterLabelsEntry]): + Required. The cluster labels. Cluster must + have all labels to match. + """ + + zone = proto.Field(proto.STRING, number=1) + + cluster_labels = proto.MapField(proto.STRING, proto.STRING, number=2) + + +class OrderedJob(proto.Message): + r"""A job executed by the workflow. + + Attributes: + step_id (str): + Required. The step id. The id must be unique among all jobs + within the template. + + The step id is used as prefix for job id, as job + ``goog-dataproc-workflow-step-id`` label, and in + [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + field from other steps. + + The id must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of between 3 and 50 + characters. + hadoop_job (~.gcd_jobs.HadoopJob): + + spark_job (~.gcd_jobs.SparkJob): + + pyspark_job (~.gcd_jobs.PySparkJob): + + hive_job (~.gcd_jobs.HiveJob): + + pig_job (~.gcd_jobs.PigJob): + + spark_r_job (~.gcd_jobs.SparkRJob): + Spark R job + spark_sql_job (~.gcd_jobs.SparkSqlJob): + + presto_job (~.gcd_jobs.PrestoJob): + Presto job + labels (Sequence[~.workflow_templates.OrderedJob.LabelsEntry]): + Optional. The labels to associate with this job. + + Label keys must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + + Label values must be between 1 and 63 characters long, and + must conform to the following regular expression: + [\p{Ll}\p{Lo}\p{N}_-]{0,63} + + No more than 32 labels can be associated with a given job. + scheduling (~.gcd_jobs.JobScheduling): + Optional. Job scheduling configuration. + prerequisite_step_ids (Sequence[str]): + Optional. The optional list of prerequisite job step_ids. If + not specified, the job will start at the beginning of + workflow. + """ + + step_id = proto.Field(proto.STRING, number=1) + + hadoop_job = proto.Field( + proto.MESSAGE, number=2, oneof="job_type", message=gcd_jobs.HadoopJob, + ) + + spark_job = proto.Field( + proto.MESSAGE, number=3, oneof="job_type", message=gcd_jobs.SparkJob, + ) + + pyspark_job = proto.Field( + proto.MESSAGE, number=4, oneof="job_type", message=gcd_jobs.PySparkJob, + ) + + hive_job = proto.Field( + proto.MESSAGE, number=5, oneof="job_type", message=gcd_jobs.HiveJob, + ) + + pig_job = proto.Field( + proto.MESSAGE, number=6, oneof="job_type", message=gcd_jobs.PigJob, + ) + + spark_r_job = proto.Field( + proto.MESSAGE, number=11, oneof="job_type", message=gcd_jobs.SparkRJob, + ) + + spark_sql_job = proto.Field( + proto.MESSAGE, number=7, oneof="job_type", message=gcd_jobs.SparkSqlJob, + ) + + presto_job = proto.Field( + proto.MESSAGE, number=12, oneof="job_type", message=gcd_jobs.PrestoJob, + ) + + labels = proto.MapField(proto.STRING, proto.STRING, number=8) + + scheduling = proto.Field(proto.MESSAGE, number=9, message=gcd_jobs.JobScheduling,) + + prerequisite_step_ids = proto.RepeatedField(proto.STRING, number=10) + + +class TemplateParameter(proto.Message): + r"""A configurable parameter that replaces one or more fields in + the template. Parameterizable fields: + - Labels + - File uris + - Job properties + - Job arguments + - Script variables + - Main class (in HadoopJob and SparkJob) + - Zone (in ClusterSelector) + + Attributes: + name (str): + Required. Parameter name. The parameter name is used as the + key, and paired with the parameter value, which are passed + to the template when the template is instantiated. The name + must contain only capital letters (A-Z), numbers (0-9), and + underscores (_), and must not start with a number. The + maximum length is 40 characters. + fields (Sequence[str]): + Required. Paths to all fields that the parameter replaces. A + field is allowed to appear in at most one parameter's list + of field paths. + + A field path is similar in syntax to a + [google.protobuf.FieldMask][google.protobuf.FieldMask]. For + example, a field path that references the zone field of a + workflow template's cluster selector would be specified as + ``placement.clusterSelector.zone``. + + Also, field paths can reference fields using the following + syntax: + + - Values in maps can be referenced by key: + + - labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - placement.managedCluster.labels['key'] + - placement.clusterSelector.clusterLabels['key'] + - jobs['step-id'].labels['key'] + + - Jobs in the jobs list can be referenced by step-id: + + - jobs['step-id'].hadoopJob.mainJarFileUri + - jobs['step-id'].hiveJob.queryFileUri + - jobs['step-id'].pySparkJob.mainPythonFileUri + - jobs['step-id'].hadoopJob.jarFileUris[0] + - jobs['step-id'].hadoopJob.archiveUris[0] + - jobs['step-id'].hadoopJob.fileUris[0] + - jobs['step-id'].pySparkJob.pythonFileUris[0] + + - Items in repeated fields can be referenced by a + zero-based index: + + - jobs['step-id'].sparkJob.args[0] + + - Other examples: + + - jobs['step-id'].hadoopJob.properties['key'] + - jobs['step-id'].hadoopJob.args[0] + - jobs['step-id'].hiveJob.scriptVariables['key'] + - jobs['step-id'].hadoopJob.mainJarFileUri + - placement.clusterSelector.zone + + It may not be possible to parameterize maps and repeated + fields in their entirety since only individual map values + and individual items in repeated fields can be referenced. + For example, the following field paths are invalid: + + - placement.clusterSelector.clusterLabels + - jobs['step-id'].sparkJob.args + description (str): + Optional. Brief description of the parameter. + Must not exceed 1024 characters. + validation (~.workflow_templates.ParameterValidation): + Optional. Validation rules to be applied to + this parameter's value. + """ + + name = proto.Field(proto.STRING, number=1) + + fields = proto.RepeatedField(proto.STRING, number=2) + + description = proto.Field(proto.STRING, number=3) + + validation = proto.Field(proto.MESSAGE, number=4, message="ParameterValidation",) + + +class ParameterValidation(proto.Message): + r"""Configuration for parameter validation. + + Attributes: + regex (~.workflow_templates.RegexValidation): + Validation based on regular expressions. + values (~.workflow_templates.ValueValidation): + Validation based on a list of allowed values. + """ + + regex = proto.Field( + proto.MESSAGE, number=1, oneof="validation_type", message="RegexValidation", + ) + + values = proto.Field( + proto.MESSAGE, number=2, oneof="validation_type", message="ValueValidation", + ) + + +class RegexValidation(proto.Message): + r"""Validation based on regular expressions. + + Attributes: + regexes (Sequence[str]): + Required. RE2 regular expressions used to + validate the parameter's value. The value must + match the regex in its entirety (substring + matches are not sufficient). + """ + + regexes = proto.RepeatedField(proto.STRING, number=1) + + +class ValueValidation(proto.Message): + r"""Validation based on a list of allowed values. + + Attributes: + values (Sequence[str]): + Required. List of allowed values for the + parameter. + """ + + values = proto.RepeatedField(proto.STRING, number=1) + + +class WorkflowMetadata(proto.Message): + r"""A Dataproc workflow template resource. + + Attributes: + template (str): + Output only. The resource name of the workflow template as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates``, the resource + name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Output only. The version of template at the + time of workflow instantiation. + create_cluster (~.workflow_templates.ClusterOperation): + Output only. The create cluster operation + metadata. + graph (~.workflow_templates.WorkflowGraph): + Output only. The workflow graph. + delete_cluster (~.workflow_templates.ClusterOperation): + Output only. The delete cluster operation + metadata. + state (~.workflow_templates.WorkflowMetadata.State): + Output only. The workflow state. + cluster_name (str): + Output only. The name of the target cluster. + parameters (Sequence[~.workflow_templates.WorkflowMetadata.ParametersEntry]): + Map from parameter names to values that were + used for those parameters. + start_time (~.timestamp.Timestamp): + Output only. Workflow start time. + end_time (~.timestamp.Timestamp): + Output only. Workflow end time. + cluster_uuid (str): + Output only. The UUID of target cluster. + """ + + class State(proto.Enum): + r"""The operation state.""" + UNKNOWN = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + + template = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + create_cluster = proto.Field(proto.MESSAGE, number=3, message="ClusterOperation",) + + graph = proto.Field(proto.MESSAGE, number=4, message="WorkflowGraph",) + + delete_cluster = proto.Field(proto.MESSAGE, number=5, message="ClusterOperation",) + + state = proto.Field(proto.ENUM, number=6, enum=State,) + + cluster_name = proto.Field(proto.STRING, number=7) + + parameters = proto.MapField(proto.STRING, proto.STRING, number=8) + + start_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,) + + end_time = proto.Field(proto.MESSAGE, number=10, message=timestamp.Timestamp,) + + cluster_uuid = proto.Field(proto.STRING, number=11) + + +class ClusterOperation(proto.Message): + r"""The cluster operation triggered by a workflow. + + Attributes: + operation_id (str): + Output only. The id of the cluster operation. + error (str): + Output only. Error, if operation failed. + done (bool): + Output only. Indicates the operation is done. + """ + + operation_id = proto.Field(proto.STRING, number=1) + + error = proto.Field(proto.STRING, number=2) + + done = proto.Field(proto.BOOL, number=3) + + +class WorkflowGraph(proto.Message): + r"""The workflow graph. + + Attributes: + nodes (Sequence[~.workflow_templates.WorkflowNode]): + Output only. The workflow nodes. + """ + + nodes = proto.RepeatedField(proto.MESSAGE, number=1, message="WorkflowNode",) + + +class WorkflowNode(proto.Message): + r"""The workflow node. + + Attributes: + step_id (str): + Output only. The name of the node. + prerequisite_step_ids (Sequence[str]): + Output only. Node's prerequisite nodes. + job_id (str): + Output only. The job id; populated after the + node enters RUNNING state. + state (~.workflow_templates.WorkflowNode.NodeState): + Output only. The node state. + error (str): + Output only. The error detail. + """ + + class NodeState(proto.Enum): + r"""The workflow node state.""" + NODE_STATUS_UNSPECIFIED = 0 + BLOCKED = 1 + RUNNABLE = 2 + RUNNING = 3 + COMPLETED = 4 + FAILED = 5 + + step_id = proto.Field(proto.STRING, number=1) + + prerequisite_step_ids = proto.RepeatedField(proto.STRING, number=2) + + job_id = proto.Field(proto.STRING, number=3) + + state = proto.Field(proto.ENUM, number=5, enum=NodeState,) + + error = proto.Field(proto.STRING, number=6) + + +class CreateWorkflowTemplateRequest(proto.Message): + r"""A request to create a workflow template. + + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,create``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.create``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + template (~.workflow_templates.WorkflowTemplate): + Required. The Dataproc workflow template to + create. + """ + + parent = proto.Field(proto.STRING, number=1) + + template = proto.Field(proto.MESSAGE, number=2, message=WorkflowTemplate,) + + +class GetWorkflowTemplateRequest(proto.Message): + r"""A request to fetch a workflow template. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.get``, the + resource name of the template has the following format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + retrieve. Only previously instantiated versions + can be retrieved. + If unspecified, retrieves the current version. + """ + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + +class InstantiateWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate a workflow template. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + instantiate. If specified, the workflow will be + instantiated only if the current version of the + workflow template has the supplied version. + This option cannot be used to instantiate a + previous version of workflow template. + instance_id (str): + Deprecated. Please use ``request_id`` field instead. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + parameters (Sequence[~.workflow_templates.InstantiateWorkflowTemplateRequest.ParametersEntry]): + Optional. Map from parameter names to values + that should be used for those parameters. Values + may not exceed 100 characters. + """ + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + instance_id = proto.Field(proto.STRING, number=3) + + request_id = proto.Field(proto.STRING, number=5) + + parameters = proto.MapField(proto.STRING, proto.STRING, number=4) + + +class InstantiateInlineWorkflowTemplateRequest(proto.Message): + r"""A request to instantiate an inline workflow template. + + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For + ``projects.regions.workflowTemplates,instantiateinline``, + the resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For + ``projects.locations.workflowTemplates.instantiateinline``, + the resource name of the location has the following + format: ``projects/{project_id}/locations/{location}`` + template (~.workflow_templates.WorkflowTemplate): + Required. The workflow template to + instantiate. + instance_id (str): + Deprecated. Please use ``request_id`` field instead. + request_id (str): + Optional. A tag that prevents multiple concurrent workflow + instances with the same tag from running. This mitigates + risk of concurrent instances started due to retries. + + It is recommended to always set this value to a + `UUID `__. + + The tag must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + parent = proto.Field(proto.STRING, number=1) + + template = proto.Field(proto.MESSAGE, number=2, message=WorkflowTemplate,) + + instance_id = proto.Field(proto.STRING, number=3) + + request_id = proto.Field(proto.STRING, number=4) + + +class UpdateWorkflowTemplateRequest(proto.Message): + r"""A request to update a workflow template. + + Attributes: + template (~.workflow_templates.WorkflowTemplate): + Required. The updated workflow template. + + The ``template.version`` field must match the current + version. + """ + + template = proto.Field(proto.MESSAGE, number=1, message=WorkflowTemplate,) + + +class ListWorkflowTemplatesRequest(proto.Message): + r"""A request to list workflow templates in a project. + + Attributes: + parent (str): + Required. The resource name of the region or location, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates,list``, the + resource name of the region has the following format: + ``projects/{project_id}/regions/{region}`` + + - For ``projects.locations.workflowTemplates.list``, the + resource name of the location has the following format: + ``projects/{project_id}/locations/{location}`` + page_size (int): + Optional. The maximum number of results to + return in each response. + page_token (str): + Optional. The page token, returned by a + previous call, to request the next page of + results. + """ + + parent = proto.Field(proto.STRING, number=1) + + page_size = proto.Field(proto.INT32, number=2) + + page_token = proto.Field(proto.STRING, number=3) + + +class ListWorkflowTemplatesResponse(proto.Message): + r"""A response to a request to list workflow templates in a + project. + + Attributes: + templates (Sequence[~.workflow_templates.WorkflowTemplate]): + Output only. WorkflowTemplates list. + next_page_token (str): + Output only. This token is included in the response if there + are more results to fetch. To fetch additional results, + provide this value as the page_token in a subsequent + ListWorkflowTemplatesRequest. + """ + + @property + def raw_page(self): + return self + + templates = proto.RepeatedField(proto.MESSAGE, number=1, message=WorkflowTemplate,) + + next_page_token = proto.Field(proto.STRING, number=2) + + +class DeleteWorkflowTemplateRequest(proto.Message): + r"""A request to delete a workflow template. + Currently started workflows will remain running. + + Attributes: + name (str): + Required. The resource name of the workflow template, as + described in + https://cloud.google.com/apis/design/resource_names. + + - For ``projects.regions.workflowTemplates.delete``, the + resource name of the template has the following format: + ``projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`` + + - For ``projects.locations.workflowTemplates.instantiate``, + the resource name of the template has the following + format: + ``projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`` + version (int): + Optional. The version of workflow template to + delete. If specified, will only delete the + template if the current server version matches + specified version. + """ + + name = proto.Field(proto.STRING, number=1) + + version = proto.Field(proto.INT32, number=2) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/noxfile.py b/noxfile.py index 016cd4a1..6374c11f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -27,8 +27,8 @@ BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] -UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] +SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"] @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -70,6 +70,8 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. + session.install("asyncmock", "pytest-asyncio") + session.install("mock", "pytest", "pytest-cov") session.install("-e", ".") @@ -139,7 +141,7 @@ def cover(session): test runs (not system test runs), and then erases coverage data. """ session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=89") + session.run("coverage", "report", "--show-missing", "--fail-under=100") session.run("coverage", "erase") @@ -154,7 +156,7 @@ def docs(session): shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( "sphinx-build", - "-W", # warnings as errors + # "-W", # warnings as errors "-T", # show full traceback on exception "-N", # no colors "-b", diff --git a/samples/snippets/create_cluster.py b/samples/snippets/create_cluster.py index b4d63d2e..f5973083 100644 --- a/samples/snippets/create_cluster.py +++ b/samples/snippets/create_cluster.py @@ -38,38 +38,34 @@ def create_cluster(project_id, region, cluster_name): """ # Create a client with the endpoint set to the desired cluster region. - cluster_client = dataproc.ClusterControllerClient(client_options={ - 'api_endpoint': f'{region}-dataproc.googleapis.com:443', - }) + cluster_client = dataproc.ClusterControllerClient( + client_options={"api_endpoint": f"{region}-dataproc.googleapis.com:443",} + ) # Create the cluster config. cluster = { - 'project_id': project_id, - 'cluster_name': cluster_name, - 'config': { - 'master_config': { - 'num_instances': 1, - 'machine_type_uri': 'n1-standard-1' - }, - 'worker_config': { - 'num_instances': 2, - 'machine_type_uri': 'n1-standard-1' - } - } + "project_id": project_id, + "cluster_name": cluster_name, + "config": { + "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-1"}, + "worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-1"}, + }, } # Create the cluster. - operation = cluster_client.create_cluster(project_id, region, cluster) + operation = cluster_client.create_cluster( + request={"project_id": project_id, "region": region, "cluster": cluster} + ) result = operation.result() # Output a success message. - print(f'Cluster created successfully: {result.cluster_name}') + print(f"Cluster created successfully: {result.cluster_name}") # [END dataproc_create_cluster] if __name__ == "__main__": if len(sys.argv) < 4: - sys.exit('python create_cluster.py project_id region cluster_name') + sys.exit("python create_cluster.py project_id region cluster_name") project_id = sys.argv[1] region = sys.argv[2] diff --git a/samples/snippets/create_cluster_test.py b/samples/snippets/create_cluster_test.py index 6b1d6806..fd391c3e 100644 --- a/samples/snippets/create_cluster_test.py +++ b/samples/snippets/create_cluster_test.py @@ -21,20 +21,26 @@ import create_cluster -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] -REGION = 'us-central1' -CLUSTER_NAME = 'py-cc-test-{}'.format(str(uuid.uuid4())) +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] +REGION = "us-central1" +CLUSTER_NAME = "py-cc-test-{}".format(str(uuid.uuid4())) @pytest.fixture(autouse=True) def teardown(): yield - cluster_client = dataproc.ClusterControllerClient(client_options={ - 'api_endpoint': f'{REGION}-dataproc.googleapis.com:443' - }) + cluster_client = dataproc.ClusterControllerClient( + client_options={"api_endpoint": f"{REGION}-dataproc.googleapis.com:443"} + ) # Client library function - operation = cluster_client.delete_cluster(PROJECT_ID, REGION, CLUSTER_NAME) + operation = cluster_client.delete_cluster( + request={ + "project_id": PROJECT_ID, + "region": REGION, + "cluster_name": CLUSTER_NAME, + } + ) # Wait for cluster to delete operation.result() diff --git a/samples/snippets/dataproc_e2e_donttest.py b/samples/snippets/dataproc_e2e_donttest.py index 44cc03bf..a92f4606 100644 --- a/samples/snippets/dataproc_e2e_donttest.py +++ b/samples/snippets/dataproc_e2e_donttest.py @@ -20,13 +20,12 @@ import submit_job_to_cluster -PROJECT = os.environ['GOOGLE_CLOUD_PROJECT'] -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -CLUSTER_NAME = 'testcluster3' -ZONE = 'us-central1-b' +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BUCKET = os.environ["CLOUD_STORAGE_BUCKET"] +CLUSTER_NAME = "testcluster3" +ZONE = "us-central1-b" def test_e2e(): - output = submit_job_to_cluster.main( - PROJECT, ZONE, CLUSTER_NAME, BUCKET) + output = submit_job_to_cluster.main(PROJECT, ZONE, CLUSTER_NAME, BUCKET) assert b"['Hello,', 'dog', 'elephant', 'panther', 'world!']" in output diff --git a/samples/snippets/instantiate_inline_workflow_template.py b/samples/snippets/instantiate_inline_workflow_template.py index f9358376..b3a40d13 100644 --- a/samples/snippets/instantiate_inline_workflow_template.py +++ b/samples/snippets/instantiate_inline_workflow_template.py @@ -36,71 +36,61 @@ def instantiate_inline_workflow_template(project_id, region): # Create a client with the endpoint set to the desired region. workflow_template_client = dataproc.WorkflowTemplateServiceClient( - client_options={ - 'api_endpoint': f'{region}-dataproc.googleapis.com:443' - } + client_options={"api_endpoint": f"{region}-dataproc.googleapis.com:443"} ) - parent = workflow_template_client.region_path(project_id, region) + parent = "projects/{}/regions/{}".format(project_id, region) template = { - 'jobs': [ + "jobs": [ { - 'hadoop_job': { - 'main_jar_file_uri': 'file:///usr/lib/hadoop-mapreduce/' - 'hadoop-mapreduce-examples.jar', - 'args': [ - 'teragen', - '1000', - 'hdfs:///gen/' - ] + "hadoop_job": { + "main_jar_file_uri": "file:///usr/lib/hadoop-mapreduce/" + "hadoop-mapreduce-examples.jar", + "args": ["teragen", "1000", "hdfs:///gen/"], }, - 'step_id': 'teragen' + "step_id": "teragen", }, { - 'hadoop_job': { - 'main_jar_file_uri': 'file:///usr/lib/hadoop-mapreduce/' - 'hadoop-mapreduce-examples.jar', - 'args': [ - 'terasort', - 'hdfs:///gen/', - 'hdfs:///sort/' - ] + "hadoop_job": { + "main_jar_file_uri": "file:///usr/lib/hadoop-mapreduce/" + "hadoop-mapreduce-examples.jar", + "args": ["terasort", "hdfs:///gen/", "hdfs:///sort/"], }, - 'step_id': 'terasort', - 'prerequisite_step_ids': [ - 'teragen' - ] - }], - 'placement': { - 'managed_cluster': { - 'cluster_name': 'my-managed-cluster', - 'config': { - 'gce_cluster_config': { + "step_id": "terasort", + "prerequisite_step_ids": ["teragen"], + }, + ], + "placement": { + "managed_cluster": { + "cluster_name": "my-managed-cluster", + "config": { + "gce_cluster_config": { # Leave 'zone_uri' empty for 'Auto Zone Placement' # 'zone_uri': '' - 'zone_uri': 'us-central1-a' + "zone_uri": "us-central1-a" } - } + }, } - } + }, } # Submit the request to instantiate the workflow from an inline template. operation = workflow_template_client.instantiate_inline_workflow_template( - parent, template + request={"parent": parent, "template": template} ) operation.result() # Output a success message. - print('Workflow ran successfully.') + print("Workflow ran successfully.") # [END dataproc_instantiate_inline_workflow_template] if __name__ == "__main__": if len(sys.argv) < 3: - sys.exit('python instantiate_inline_workflow_template.py ' - + 'project_id region') + sys.exit( + "python instantiate_inline_workflow_template.py " + "project_id region" + ) project_id = sys.argv[1] region = sys.argv[2] diff --git a/samples/snippets/instantiate_inline_workflow_template_test.py b/samples/snippets/instantiate_inline_workflow_template_test.py index 22673e4e..ef4f31a5 100644 --- a/samples/snippets/instantiate_inline_workflow_template_test.py +++ b/samples/snippets/instantiate_inline_workflow_template_test.py @@ -17,8 +17,8 @@ import instantiate_inline_workflow_template -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] -REGION = 'us-central1' +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] +REGION = "us-central1" def test_workflows(capsys): diff --git a/samples/snippets/list_clusters.py b/samples/snippets/list_clusters.py index 1639c413..f0e7bac3 100644 --- a/samples/snippets/list_clusters.py +++ b/samples/snippets/list_clusters.py @@ -19,45 +19,50 @@ import argparse from google.cloud import dataproc_v1 -from google.cloud.dataproc_v1.gapic.transports import ( - cluster_controller_grpc_transport) +from google.cloud.dataproc_v1.gapic.transports import cluster_controller_grpc_transport # [START dataproc_list_clusters] def list_clusters(dataproc, project, region): """List the details of clusters in the region.""" - for cluster in dataproc.list_clusters(project, region): - print(('{} - {}'.format(cluster.cluster_name, - cluster.status.State.Name( - cluster.status.state)))) + for cluster in dataproc.list_clusters( + request={"project_id": project, "region": region} + ): + print( + ( + "{} - {}".format( + cluster.cluster_name, + cluster.status.State.Name(cluster.status.state), + ) + ) + ) + + # [END dataproc_list_clusters] def main(project_id, region): - if region == 'global': + if region == "global": # Use the default gRPC global endpoints. dataproc_cluster_client = dataproc_v1.ClusterControllerClient() else: # Use a regional gRPC endpoint. See: # https://cloud.google.com/dataproc/docs/concepts/regional-endpoints - client_transport = ( - cluster_controller_grpc_transport.ClusterControllerGrpcTransport( - address='{}-dataproc.googleapis.com:443'.format(region))) - dataproc_cluster_client = dataproc_v1.ClusterControllerClient( - client_transport) + client_transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport( + address="{}-dataproc.googleapis.com:443".format(region) + ) + dataproc_cluster_client = dataproc_v1.ClusterControllerClient(client_transport) list_clusters(dataproc_cluster_client, project_id, region) -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, formatter_class=( - argparse.RawDescriptionHelpFormatter)) - parser.add_argument( - '--project_id', help='Project ID to access.', required=True) - parser.add_argument( - '--region', help='Region of clusters to list.', required=True) + description=__doc__, formatter_class=(argparse.RawDescriptionHelpFormatter) + ) + parser.add_argument("--project_id", help="Project ID to access.", required=True) + parser.add_argument("--region", help="Region of clusters to list.", required=True) args = parser.parse_args() main(args.project_id, args.region) diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index ba55d7ce..5660f08b 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -37,24 +37,22 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -69,12 +67,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -83,7 +81,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -138,7 +136,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) @@ -182,9 +180,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/samples/snippets/pyspark_sort.py b/samples/snippets/pyspark_sort.py index 0ce2350a..e43d57f1 100644 --- a/samples/snippets/pyspark_sort.py +++ b/samples/snippets/pyspark_sort.py @@ -22,7 +22,7 @@ import pyspark sc = pyspark.SparkContext() -rdd = sc.parallelize(['Hello,', 'world!', 'dog', 'elephant', 'panther']) +rdd = sc.parallelize(["Hello,", "world!", "dog", "elephant", "panther"]) words = sorted(rdd.collect()) print(words) # [END dataproc_pyspark_sort] diff --git a/samples/snippets/pyspark_sort_gcs.py b/samples/snippets/pyspark_sort_gcs.py index f1961c37..4b1e7b2d 100644 --- a/samples/snippets/pyspark_sort_gcs.py +++ b/samples/snippets/pyspark_sort_gcs.py @@ -25,6 +25,6 @@ import pyspark sc = pyspark.SparkContext() -rdd = sc.textFile('gs://path-to-your-GCS-file') +rdd = sc.textFile("gs://path-to-your-GCS-file") print(sorted(rdd.collect())) # [END dataproc_pyspark_sort_gcs] diff --git a/samples/snippets/quickstart/quickstart.py b/samples/snippets/quickstart/quickstart.py index 4159e281..362016c7 100644 --- a/samples/snippets/quickstart/quickstart.py +++ b/samples/snippets/quickstart/quickstart.py @@ -35,57 +35,51 @@ def quickstart(project_id, region, cluster_name, job_file_path): # Create the cluster client. - cluster_client = dataproc.ClusterControllerClient(client_options={ - 'api_endpoint': '{}-dataproc.googleapis.com:443'.format(region) - }) + cluster_client = dataproc.ClusterControllerClient( + client_options={"api_endpoint": "{}-dataproc.googleapis.com:443".format(region)} + ) # Create the cluster config. cluster = { - 'project_id': project_id, - 'cluster_name': cluster_name, - 'config': { - 'master_config': { - 'num_instances': 1, - 'machine_type_uri': 'n1-standard-1' - }, - 'worker_config': { - 'num_instances': 2, - 'machine_type_uri': 'n1-standard-1' - } - } + "project_id": project_id, + "cluster_name": cluster_name, + "config": { + "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-1"}, + "worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-1"}, + }, } # Create the cluster. - operation = cluster_client.create_cluster(project_id, region, cluster) + operation = cluster_client.create_cluster( + request={"project_id": project_id, "region": region, "cluster": cluster} + ) result = operation.result() - print('Cluster created successfully: {}'.format(result.cluster_name)) + print("Cluster created successfully: {}".format(result.cluster_name)) # Create the job client. - job_client = dataproc.JobControllerClient(client_options={ - 'api_endpoint': '{}-dataproc.googleapis.com:443'.format(region) - }) + job_client = dataproc.JobControllerClient( + client_options={"api_endpoint": "{}-dataproc.googleapis.com:443".format(region)} + ) # Create the job config. job = { - 'placement': { - 'cluster_name': cluster_name - }, - 'pyspark_job': { - 'main_python_file_uri': job_file_path - } + "placement": {"cluster_name": cluster_name}, + "pyspark_job": {"main_python_file_uri": job_file_path}, } - job_response = job_client.submit_job(project_id, region, job) + job_response = job_client.submit_job( + request={"project_id": project_id, "region": region, "job": job} + ) job_id = job_response.reference.job_id - print('Submitted job \"{}\".'.format(job_id)) + print('Submitted job "{}".'.format(job_id)) # Termimal states for a job. terminal_states = { - dataproc.types.JobStatus.ERROR, - dataproc.types.JobStatus.CANCELLED, - dataproc.types.JobStatus.DONE + dataproc.JobStatus.ERROR, + dataproc.JobStatus.CANCELLED, + dataproc.JobStatus.DONE, } # Create a timeout such that the job gets cancelled if not in a @@ -96,64 +90,85 @@ def quickstart(project_id, region, cluster_name, job_file_path): # Wait for the job to complete. while job_response.status.state not in terminal_states: if time.time() > time_start + timeout_seconds: - job_client.cancel_job(project_id, region, job_id) - print('Job {} timed out after threshold of {} seconds.'.format( - job_id, timeout_seconds)) + job_client.cancel_job( + request={"project_id": project_id, "region": region, "job_id": job_id} + ) + print( + "Job {} timed out after threshold of {} seconds.".format( + job_id, timeout_seconds + ) + ) # Poll for job termination once a second. time.sleep(1) - job_response = job_client.get_job(project_id, region, job_id) + job_response = job_client.get_job( + request={"project_id": project_id, "region": region, "job_id": job_id} + ) # Cloud Dataproc job output gets saved to a GCS bucket allocated to it. cluster_info = cluster_client.get_cluster( - project_id, region, cluster_name) + request={ + "project_id": project_id, + "region": region, + "cluster_name": cluster_name, + } + ) storage_client = storage.Client() bucket = storage_client.get_bucket(cluster_info.config.config_bucket) - output_blob = ( - 'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000' - .format(cluster_info.cluster_uuid, job_id)) + output_blob = "google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000".format( + cluster_info.cluster_uuid, job_id + ) output = bucket.blob(output_blob).download_as_string() - print('Job {} finished with state {}:\n{}'.format( - job_id, - job_response.status.State.Name(job_response.status.state), - output)) + print( + "Job {} finished with state {}:\n{}".format( + job_id, job_response.status.State.Name(job_response.status.state), output + ) + ) # Delete the cluster once the job has terminated. - operation = cluster_client.delete_cluster(project_id, region, cluster_name) + operation = cluster_client.delete_cluster( + request={ + "project_id": project_id, + "region": region, + "cluster_name": cluster_name, + } + ) operation.result() - print('Cluster {} successfully deleted.'.format(cluster_name)) + print("Cluster {} successfully deleted.".format(cluster_name)) if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( - '--project_id', + "--project_id", type=str, required=True, - help='Project to use for creating resources.') + help="Project to use for creating resources.", + ) parser.add_argument( - '--region', + "--region", type=str, required=True, - help='Region where the resources should live.') + help="Region where the resources should live.", + ) parser.add_argument( - '--cluster_name', + "--cluster_name", type=str, required=True, - help='Name to use for creating a cluster.') + help="Name to use for creating a cluster.", + ) parser.add_argument( - '--job_file_path', + "--job_file_path", type=str, required=True, - help='Job in GCS to execute against the cluster.') + help="Job in GCS to execute against the cluster.", + ) args = parser.parse_args() - quickstart(args.project_id, args.region, - args.cluster_name, args.job_file_path) + quickstart(args.project_id, args.region, args.cluster_name, args.job_file_path) # [END dataproc_quickstart] diff --git a/samples/snippets/quickstart/quickstart_test.py b/samples/snippets/quickstart/quickstart_test.py index 3e17f6fa..9ea46cd1 100644 --- a/samples/snippets/quickstart/quickstart_test.py +++ b/samples/snippets/quickstart/quickstart_test.py @@ -22,12 +22,12 @@ import quickstart -PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] -REGION = 'us-central1' -CLUSTER_NAME = 'py-qs-test-{}'.format(str(uuid.uuid4())) -STAGING_BUCKET = 'py-dataproc-qs-bucket-{}'.format(str(uuid.uuid4())) -JOB_FILE_NAME = 'sum.py' -JOB_FILE_PATH = 'gs://{}/{}'.format(STAGING_BUCKET, JOB_FILE_NAME) +PROJECT_ID = os.environ["GOOGLE_CLOUD_PROJECT"] +REGION = "us-central1" +CLUSTER_NAME = "py-qs-test-{}".format(str(uuid.uuid4())) +STAGING_BUCKET = "py-dataproc-qs-bucket-{}".format(str(uuid.uuid4())) +JOB_FILE_NAME = "sum.py" +JOB_FILE_PATH = "gs://{}/{}".format(STAGING_BUCKET, JOB_FILE_NAME) SORT_CODE = ( "import pyspark\n" "sc = pyspark.SparkContext()\n" @@ -45,17 +45,25 @@ def setup_teardown(): yield - cluster_client = dataproc.ClusterControllerClient(client_options={ - 'api_endpoint': '{}-dataproc.googleapis.com:443'.format(REGION) - }) + cluster_client = dataproc.ClusterControllerClient( + client_options={"api_endpoint": "{}-dataproc.googleapis.com:443".format(REGION)} + ) # The quickstart sample deletes the cluster, but if the test fails # before cluster deletion occurs, it can be manually deleted here. - clusters = cluster_client.list_clusters(PROJECT_ID, REGION) + clusters = cluster_client.list_clusters( + request={"project_id": PROJECT_ID, "region": REGION} + ) for cluster in clusters: if cluster.cluster_name == CLUSTER_NAME: - cluster_client.delete_cluster(PROJECT_ID, REGION, CLUSTER_NAME) + cluster_client.delete_cluster( + request={ + "project_id": PROJECT_ID, + "region": REGION, + "cluster_name": CLUSTER_NAME, + } + ) blob.delete() bucket.delete() @@ -65,7 +73,7 @@ def test_quickstart(capsys): quickstart.quickstart(PROJECT_ID, REGION, CLUSTER_NAME, JOB_FILE_PATH) out, _ = capsys.readouterr() - assert 'Cluster created successfully' in out - assert 'Submitted job' in out - assert 'finished with state DONE:' in out - assert 'successfully deleted' in out + assert "Cluster created successfully" in out + assert "Submitted job" in out + assert "finished with state DONE:" in out + assert "successfully deleted" in out diff --git a/samples/snippets/single_job_workflow.py b/samples/snippets/single_job_workflow.py index b2754b06..04f73ea9 100644 --- a/samples/snippets/single_job_workflow.py +++ b/samples/snippets/single_job_workflow.py @@ -30,7 +30,8 @@ from google.cloud import dataproc_v1 from google.cloud import storage from google.cloud.dataproc_v1.gapic.transports import ( - workflow_template_service_grpc_transport) + workflow_template_service_grpc_transport, +) DEFAULT_FILENAME = "pyspark_sort.py" @@ -66,12 +67,12 @@ def upload_pyspark_file(project, bucket_name, filename, spark_file): blob.upload_from_file(spark_file) -def run_workflow(dataproc, project, region, zone, bucket_name, filename, - cluster_name): +def run_workflow(dataproc, project, region, zone, bucket_name, filename, cluster_name): parent = "projects/{}/regions/{}".format(project, region) - zone_uri = ("https://www.googleapis.com/compute/v1/projects/{}/zones/{}" - .format(project, zone)) + zone_uri = "https://www.googleapis.com/compute/v1/projects/{}/zones/{}".format( + project, zone + ) workflow_data = { "placement": { @@ -93,16 +94,16 @@ def run_workflow(dataproc, project, region, zone, bucket_name, filename, "jobs": [ { "pyspark_job": { - "main_python_file_uri": "gs://{}/{}".format( - bucket_name, filename) + "main_python_file_uri": "gs://{}/{}".format(bucket_name, filename) }, "step_id": "pyspark-job", } ], } - workflow = dataproc.instantiate_inline_workflow_template(parent, - workflow_data) + workflow = dataproc.instantiate_inline_workflow_template( + request={"parent": parent, "template": workflow_data} + ) workflow.add_done_callback(callback) global waiting_callback @@ -118,8 +119,10 @@ def callback(operation_future): def wait_for_workflow_end(): """Wait for cluster creation.""" print("Waiting for workflow completion ...") - print("Workflow and job progress, and job driver output available from: " - "https://console.cloud.google.com/dataproc/workflows/") + print( + "Workflow and job progress, and job driver output available from: " + "https://console.cloud.google.com/dataproc/workflows/" + ) while True: if not waiting_callback: @@ -146,10 +149,9 @@ def main( region = get_region_from_zone(zone) # Use a regional gRPC endpoint. See: # https://cloud.google.com/dataproc/docs/concepts/regional-endpoints - client_transport = (workflow_template_service_grpc_transport - .WorkflowTemplateServiceGrpcTransport( - address="{}-dataproc.googleapis.com:443" - .format(region))) + client_transport = workflow_template_service_grpc_transport.WorkflowTemplateServiceGrpcTransport( + address="{}-dataproc.googleapis.com:443".format(region) + ) dataproc_workflow_client = dataproc_v1.WorkflowTemplateServiceClient( client_transport ) @@ -157,8 +159,7 @@ def main( try: spark_file, spark_filename = get_pyspark_file(pyspark_file) - upload_pyspark_file(project_id, bucket_name, spark_filename, - spark_file) + upload_pyspark_file(project_id, bucket_name, spark_filename, spark_file) run_workflow( dataproc_workflow_client, @@ -167,7 +168,7 @@ def main( zone, bucket_name, spark_filename, - cluster_name + cluster_name, ) wait_for_workflow_end() @@ -177,8 +178,8 @@ def main( if __name__ == "__main__": parser = argparse.ArgumentParser( - description=__doc__, formatter_class=(argparse - .RawDescriptionHelpFormatter)) + description=__doc__, formatter_class=(argparse.RawDescriptionHelpFormatter) + ) parser.add_argument( "--project_id", help="Project ID you want to access.", required=True ) @@ -186,8 +187,7 @@ def main( "--zone", help="Zone to create clusters in/connect to", required=True ) parser.add_argument( - "--cluster_name", help="Name of the cluster to create/connect to", - required=True + "--cluster_name", help="Name of the cluster to create/connect to", required=True ) parser.add_argument( "--gcs_bucket", help="Bucket to upload Pyspark file to", required=True @@ -195,9 +195,11 @@ def main( parser.add_argument( "--pyspark_file", help="Pyspark filename. Defaults to pyspark_sort.py" ) - parser.add_argument("--global_region", - action="store_true", - help="If cluster is in the global region") + parser.add_argument( + "--global_region", + action="store_true", + help="If cluster is in the global region", + ) args = parser.parse_args() main( diff --git a/samples/snippets/submit_job_to_cluster.py b/samples/snippets/submit_job_to_cluster.py index 389cbec8..d613cf5b 100644 --- a/samples/snippets/submit_job_to_cluster.py +++ b/samples/snippets/submit_job_to_cluster.py @@ -34,13 +34,11 @@ from google.cloud import dataproc_v1 from google.cloud import storage -from google.cloud.dataproc_v1.gapic.transports import ( - cluster_controller_grpc_transport) -from google.cloud.dataproc_v1.gapic.transports import ( - job_controller_grpc_transport) +from google.cloud.dataproc_v1.gapic.transports import cluster_controller_grpc_transport +from google.cloud.dataproc_v1.gapic.transports import job_controller_grpc_transport -DEFAULT_FILENAME = 'pyspark_sort.py' +DEFAULT_FILENAME = "pyspark_sort.py" waiting_callback = False @@ -57,16 +55,16 @@ def get_pyspark_file(pyspark_file=None): def get_region_from_zone(zone): try: - region_as_list = zone.split('-')[:-1] - return '-'.join(region_as_list) + region_as_list = zone.split("-")[:-1] + return "-".join(region_as_list) except (AttributeError, IndexError, ValueError): - raise ValueError('Invalid zone provided, please check your input.') + raise ValueError("Invalid zone provided, please check your input.") def upload_pyspark_file(project, bucket_name, filename, spark_file): """Uploads the PySpark file in this directory to the configured input bucket.""" - print('Uploading pyspark file to Cloud Storage.') + print("Uploading pyspark file to Cloud Storage.") client = storage.Client(project=project) bucket = client.get_bucket(bucket_name) blob = bucket.blob(filename) @@ -76,44 +74,40 @@ def upload_pyspark_file(project, bucket_name, filename, spark_file): def download_output(project, cluster_id, output_bucket, job_id): """Downloads the output file from Cloud Storage and returns it as a string.""" - print('Downloading output file.') + print("Downloading output file.") client = storage.Client(project=project) bucket = client.get_bucket(output_bucket) - output_blob = ( - ('google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'. - format(cluster_id, job_id))) + output_blob = "google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000".format( + cluster_id, job_id + ) return bucket.blob(output_blob).download_as_string() # [START dataproc_create_cluster] def create_cluster(dataproc, project, zone, region, cluster_name): """Create the cluster.""" - print('Creating cluster...') - zone_uri = \ - 'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format( - project, zone) + print("Creating cluster...") + zone_uri = "https://www.googleapis.com/compute/v1/projects/{}/zones/{}".format( + project, zone + ) cluster_data = { - 'project_id': project, - 'cluster_name': cluster_name, - 'config': { - 'gce_cluster_config': { - 'zone_uri': zone_uri - }, - 'master_config': { - 'num_instances': 1, - 'machine_type_uri': 'n1-standard-1' - }, - 'worker_config': { - 'num_instances': 2, - 'machine_type_uri': 'n1-standard-1' - } - } + "project_id": project, + "cluster_name": cluster_name, + "config": { + "gce_cluster_config": {"zone_uri": zone_uri}, + "master_config": {"num_instances": 1, "machine_type_uri": "n1-standard-1"}, + "worker_config": {"num_instances": 2, "machine_type_uri": "n1-standard-1"}, + }, } - cluster = dataproc.create_cluster(project, region, cluster_data) + cluster = dataproc.create_cluster( + request={"project_id": project, "region": region, "cluster": cluster_data} + ) cluster.add_done_callback(callback) global waiting_callback waiting_callback = True + + # [END dataproc_create_cluster] @@ -125,7 +119,7 @@ def callback(operation_future): def wait_for_cluster_creation(): """Wait for cluster creation.""" - print('Waiting for cluster creation...') + print("Waiting for cluster creation...") while True: if not waiting_callback: @@ -136,79 +130,99 @@ def wait_for_cluster_creation(): # [START dataproc_list_clusters_with_detail] def list_clusters_with_details(dataproc, project, region): """List the details of clusters in the region.""" - for cluster in dataproc.list_clusters(project, region): - print(('{} - {}'.format(cluster.cluster_name, - cluster.status.State.Name( - cluster.status.state)))) + for cluster in dataproc.list_clusters( + request={"project_id": project, "region": region} + ): + print( + ( + "{} - {}".format( + cluster.cluster_name, + cluster.status.State.Name(cluster.status.state), + ) + ) + ) + + # [END dataproc_list_clusters_with_detail] def get_cluster_id_by_name(dataproc, project_id, region, cluster_name): """Helper function to retrieve the ID and output bucket of a cluster by name.""" - for cluster in dataproc.list_clusters(project_id, region): + for cluster in dataproc.list_clusters( + request={"project_id": project_id, "region": region} + ): if cluster.cluster_name == cluster_name: return cluster.cluster_uuid, cluster.config.config_bucket # [START dataproc_submit_pyspark_job] -def submit_pyspark_job(dataproc, project, region, cluster_name, bucket_name, - filename): +def submit_pyspark_job(dataproc, project, region, cluster_name, bucket_name, filename): """Submit the Pyspark job to the cluster (assumes `filename` was uploaded to `bucket_name.""" job_details = { - 'placement': { - 'cluster_name': cluster_name + "placement": {"cluster_name": cluster_name}, + "pyspark_job": { + "main_python_file_uri": "gs://{}/{}".format(bucket_name, filename) }, - 'pyspark_job': { - 'main_python_file_uri': 'gs://{}/{}'.format(bucket_name, filename) - } } result = dataproc.submit_job( - project_id=project, region=region, job=job_details) + request={"project_id": project, "region": region, "job": job_details} + ) job_id = result.reference.job_id - print('Submitted job ID {}.'.format(job_id)) + print("Submitted job ID {}.".format(job_id)) return job_id + + # [END dataproc_submit_pyspark_job] # [START dataproc_delete] def delete_cluster(dataproc, project, region, cluster): """Delete the cluster.""" - print('Tearing down cluster.') + print("Tearing down cluster.") result = dataproc.delete_cluster( - project_id=project, region=region, cluster_name=cluster) + request={"project_id": project, "region": region, "cluster_name": cluster} + ) return result + + # [END dataproc_delete] # [START dataproc_wait] def wait_for_job(dataproc, project, region, job_id): """Wait for job to complete or error out.""" - print('Waiting for job to finish...') + print("Waiting for job to finish...") while True: - job = dataproc.get_job(project, region, job_id) + job = dataproc.get_job( + request={"project_id": project, "region": region, "job_id": job_id} + ) # Handle exceptions - if job.status.State.Name(job.status.state) == 'ERROR': + if job.status.State.Name(job.status.state) == "ERROR": raise Exception(job.status.details) - elif job.status.State.Name(job.status.state) == 'DONE': - print('Job finished.') + elif job.status.State.Name(job.status.state) == "DONE": + print("Job finished.") return job + + # [END dataproc_wait] -def main(project_id, - zone, - cluster_name, - bucket_name, - pyspark_file=None, - create_new_cluster=True, - global_region=True): +def main( + project_id, + zone, + cluster_name, + bucket_name, + pyspark_file=None, + create_new_cluster=True, + global_region=True, +): # [START dataproc_get_client] if global_region: - region = 'global' + region = "global" # Use the default gRPC global endpoints. dataproc_cluster_client = dataproc_v1.ClusterControllerClient() dataproc_job_client = dataproc_v1.JobControllerClient() @@ -216,73 +230,89 @@ def main(project_id, region = get_region_from_zone(zone) # Use a regional gRPC endpoint. See: # https://cloud.google.com/dataproc/docs/concepts/regional-endpoints - client_transport = ( - cluster_controller_grpc_transport.ClusterControllerGrpcTransport( - address='{}-dataproc.googleapis.com:443'.format(region))) - job_transport = ( - job_controller_grpc_transport.JobControllerGrpcTransport( - address='{}-dataproc.googleapis.com:443'.format(region))) - dataproc_cluster_client = dataproc_v1.ClusterControllerClient( - client_transport) + client_transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport( + address="{}-dataproc.googleapis.com:443".format(region) + ) + job_transport = job_controller_grpc_transport.JobControllerGrpcTransport( + address="{}-dataproc.googleapis.com:443".format(region) + ) + dataproc_cluster_client = dataproc_v1.ClusterControllerClient(client_transport) dataproc_job_client = dataproc_v1.JobControllerClient(job_transport) # [END dataproc_get_client] try: spark_file, spark_filename = get_pyspark_file(pyspark_file) if create_new_cluster: - create_cluster(dataproc_cluster_client, project_id, zone, region, - cluster_name) + create_cluster( + dataproc_cluster_client, project_id, zone, region, cluster_name + ) wait_for_cluster_creation() - upload_pyspark_file(project_id, bucket_name, spark_filename, - spark_file) + upload_pyspark_file(project_id, bucket_name, spark_filename, spark_file) - list_clusters_with_details(dataproc_cluster_client, project_id, - region) + list_clusters_with_details(dataproc_cluster_client, project_id, region) - (cluster_id, output_bucket) = ( - get_cluster_id_by_name(dataproc_cluster_client, project_id, - region, cluster_name)) + (cluster_id, output_bucket) = get_cluster_id_by_name( + dataproc_cluster_client, project_id, region, cluster_name + ) # [START dataproc_call_submit_pyspark_job] - job_id = submit_pyspark_job(dataproc_job_client, project_id, region, - cluster_name, bucket_name, spark_filename) + job_id = submit_pyspark_job( + dataproc_job_client, + project_id, + region, + cluster_name, + bucket_name, + spark_filename, + ) # [END dataproc_call_submit_pyspark_job] wait_for_job(dataproc_job_client, project_id, region, job_id) output = download_output(project_id, cluster_id, output_bucket, job_id) - print('Received job output {}'.format(output)) + print("Received job output {}".format(output)) return output finally: if create_new_cluster: - delete_cluster(dataproc_cluster_client, project_id, region, - cluster_name) + delete_cluster(dataproc_cluster_client, project_id, region, cluster_name) spark_file.close() -if __name__ == '__main__': - parser = argparse.ArgumentParser(description=__doc__, - formatter_class=argparse. - RawDescriptionHelpFormatter) +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument( + "--project_id", help="Project ID you want to access.", required=True + ) + parser.add_argument( + "--zone", help="Zone to create clusters in/connect to", required=True + ) + parser.add_argument( + "--cluster_name", help="Name of the cluster to create/connect to", required=True + ) + parser.add_argument( + "--gcs_bucket", help="Bucket to upload Pyspark file to", required=True + ) + parser.add_argument( + "--pyspark_file", help="Pyspark filename. Defaults to pyspark_sort.py" + ) + parser.add_argument( + "--create_new_cluster", + action="store_true", + help="States if the cluster should be created", + ) parser.add_argument( - '--project_id', help='Project ID you want to access.', required=True) - parser.add_argument('--zone', - help='Zone to create clusters in/connect to', - required=True) - parser.add_argument('--cluster_name', - help='Name of the cluster to create/connect to', - required=True) - parser.add_argument('--gcs_bucket', - help='Bucket to upload Pyspark file to', - required=True) - parser.add_argument('--pyspark_file', - help='Pyspark filename. Defaults to pyspark_sort.py') - parser.add_argument('--create_new_cluster', - action='store_true', - help='States if the cluster should be created') - parser.add_argument('--global_region', - action='store_true', - help='If cluster is in the global region') + "--global_region", + action="store_true", + help="If cluster is in the global region", + ) args = parser.parse_args() - main(args.project_id, args.zone, args.cluster_name, args.gcs_bucket, - args.pyspark_file, args.create_new_cluster, args.global_region) + main( + args.project_id, + args.zone, + args.cluster_name, + args.gcs_bucket, + args.pyspark_file, + args.create_new_cluster, + args.global_region, + ) diff --git a/scripts/fixup_dataproc_v1_keywords.py b/scripts/fixup_dataproc_v1_keywords.py new file mode 100644 index 00000000..9824550a --- /dev/null +++ b/scripts/fixup_dataproc_v1_keywords.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class dataprocCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_job': ('project_id', 'region', 'job_id', ), + 'create_autoscaling_policy': ('parent', 'policy', ), + 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), + 'create_workflow_template': ('parent', 'template', ), + 'delete_autoscaling_policy': ('name', ), + 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), + 'delete_job': ('project_id', 'region', 'job_id', ), + 'delete_workflow_template': ('name', 'version', ), + 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_autoscaling_policy': ('name', ), + 'get_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_job': ('project_id', 'region', 'job_id', ), + 'get_workflow_template': ('name', 'version', ), + 'instantiate_inline_workflow_template': ('parent', 'template', 'request_id', ), + 'instantiate_workflow_template': ('name', 'version', 'request_id', 'parameters', ), + 'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ), + 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), + 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), + 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), + 'submit_job': ('project_id', 'region', 'job', 'request_id', ), + 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), + 'update_autoscaling_policy': ('policy', ), + 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), + 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), + 'update_workflow_template': ('template', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=dataprocCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the dataproc client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/scripts/fixup_dataproc_v1beta2_keywords.py b/scripts/fixup_dataproc_v1beta2_keywords.py new file mode 100644 index 00000000..ecadef2b --- /dev/null +++ b/scripts/fixup_dataproc_v1beta2_keywords.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class dataprocCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_job': ('project_id', 'region', 'job_id', ), + 'create_autoscaling_policy': ('parent', 'policy', ), + 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ), + 'create_workflow_template': ('parent', 'template', ), + 'delete_autoscaling_policy': ('name', ), + 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), + 'delete_job': ('project_id', 'region', 'job_id', ), + 'delete_workflow_template': ('name', 'version', ), + 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_autoscaling_policy': ('name', ), + 'get_cluster': ('project_id', 'region', 'cluster_name', ), + 'get_job': ('project_id', 'region', 'job_id', ), + 'get_workflow_template': ('name', 'version', ), + 'instantiate_inline_workflow_template': ('parent', 'template', 'instance_id', 'request_id', ), + 'instantiate_workflow_template': ('name', 'version', 'instance_id', 'request_id', 'parameters', ), + 'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ), + 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), + 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), + 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), + 'submit_job': ('project_id', 'region', 'job', 'request_id', ), + 'submit_job_as_operation': ('project_id', 'region', 'job', 'request_id', ), + 'update_autoscaling_policy': ('policy', ), + 'update_cluster': ('project_id', 'region', 'cluster_name', 'cluster', 'update_mask', 'graceful_decommission_timeout', 'request_id', ), + 'update_job': ('project_id', 'region', 'job_id', 'job', 'update_mask', ), + 'update_workflow_template': ('template', ), + + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), + cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=dataprocCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the dataproc client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/setup.py b/setup.py index 88db49f4..33cbcec8 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,11 @@ # 'Development Status :: 4 - Beta' # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" -dependencies = ["google-api-core[grpc] >= 1.14.0, < 2.0.0dev"] +dependencies = [ + "google-api-core[grpc] >= 1.22.0, < 2.0.0dev", + "libcst >= 0.2.5", + "proto-plus >= 1.4.0", +] extras = {} @@ -43,7 +47,9 @@ # Only include packages under the 'google' namespace. Do not include tests, # benchmarks, etc. packages = [ - package for package in setuptools.find_packages() if package.startswith("google") + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") ] # Determine which namespaces are needed. @@ -66,12 +72,10 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", "Operating System :: OS Independent", "Topic :: Internet", ], @@ -80,7 +84,11 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.6", + scripts=[ + "scripts/fixup_dataproc_v1_keywords.py", + "scripts/fixup_dataproc_v1beta2_keywords.py", + ], include_package_data=True, zip_safe=False, ) diff --git a/synth.metadata b/synth.metadata index d2f13b19..334a4418 100644 --- a/synth.metadata +++ b/synth.metadata @@ -9,10 +9,9 @@ }, { "git": { - "name": "googleapis", - "remote": "https://github.com/googleapis/googleapis.git", - "sha": "6fd07563a2f1a6785066f5955ad9659a315e4492", - "internalRef": "324941614" + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "5f2f711c91199ba2f609d3f06a2fe22aee4e5be3" } }, { diff --git a/synth.py b/synth.py index 1680ecb6..62644eae 100644 --- a/synth.py +++ b/synth.py @@ -36,64 +36,14 @@ ) s.move(library, excludes=["docs/index.rst", "nox.py", "README.rst", "setup.py"]) - s.replace( - f"google/cloud/dataproc_{version}/gapic/cluster_controller_client.py", - "metadata_type=operations_pb2.ClusterOperationMetadata,", - "metadata_type=proto_operations_pb2.ClusterOperationMetadata,", - ) - - s.replace( - f"google/cloud/dataproc_{version}/gapic/cluster_controller_client.py", - "\s+Note:.*\n(.*\n)+?.*types.FieldMask.", - f""" - - - .. note:: - - Currently, only the following fields can be updated: - - * ``labels``: Update labels - * ``config.worker_config.num_instances``: Resize primary - worker group - * ``config.secondary_worker_config.num_instances``: Resize - secondary worker group - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.dataproc_{version}.types.FieldMask`""", - ) - - s.replace( - f'google/cloud/dataproc_{version}/proto/workflow_templates_pb2.py', - ', and must\n\s+conform to the following PCRE regular expression:' - '(.*\n)+?.*No more than 32', - '. Label values must be between\n' - ' 1 and 63 characters long. No more than 32' - ) - s.replace( - f'google/cloud/dataproc_{version}/proto/workflow_templates_pb2.py', - ', and must conform to\n' - '\s+the following regular expression:(.*\n)+?.* No more than', - '. Label values must be between\n' - ' 1 and 63 characters long. No more than' - ) - -s.replace( - "google/cloud/dataproc_v1beta2/proto/clusters_pb2.py", - "# Generated by the protocol buffer compiler. DO NOT EDIT!", - "# -*- coding: utf-8 -*-\n" - "# Generated by the protocol buffer compiler. DO NOT EDIT!", -) - -s.replace( - "google/cloud/**/jobs_pb2.py", - "/etc/hadoop/conf/\*-site", - "``/etc/hadoop/conf/*-site``" -) # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library(unit_cov_level=97, cov_level=89, samples=True) -s.move(templated_files) +templated_files = common.py_library( + samples=True, # set to True only if there are samples + microgenerator=True, +) +s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file # ---------------------------------------------------------------------------- # Samples templates @@ -103,4 +53,8 @@ # TODO(busunkim): Use latest sphinx after microgenerator transition s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') -s.shell.run(["nox", "-s", "blacken"], hide_output=False) +# Temporarily disable warnings due to +# https://github.com/googleapis/gapic-generator-python/issues/525 +s.replace("noxfile.py", '[\"\']-W[\"\']', '# "-W"') + +s.shell.run(["nox", "-s", "blacken"], hide_output=False) \ No newline at end of file diff --git a/tests/system/gapic/v1/test_system_cluster_controller_v1.py b/tests/system/gapic/v1/test_system_cluster_controller_v1.py index a595af5b..604a2f1f 100644 --- a/tests/system/gapic/v1/test_system_cluster_controller_v1.py +++ b/tests/system/gapic/v1/test_system_cluster_controller_v1.py @@ -18,7 +18,6 @@ import time from google.cloud import dataproc_v1 -from google.cloud.dataproc_v1.proto import clusters_pb2 class TestSystemClusterController(object): @@ -28,4 +27,6 @@ def test_list_clusters(self): client = dataproc_v1.ClusterControllerClient() project_id_2 = project_id region = "global" - response = client.list_clusters(project_id_2, region) + response = client.list_clusters( + request={"project_id": project_id_2, "region": region} + ) diff --git a/tests/system/gapic/v1beta2/test_system_cluster_controller_v1beta2.py b/tests/system/gapic/v1beta2/test_system_cluster_controller_v1beta2.py index 8db97e8c..319bbe55 100644 --- a/tests/system/gapic/v1beta2/test_system_cluster_controller_v1beta2.py +++ b/tests/system/gapic/v1beta2/test_system_cluster_controller_v1beta2.py @@ -18,7 +18,6 @@ import time from google.cloud import dataproc_v1beta2 -from google.cloud.dataproc_v1beta2.proto import clusters_pb2 class TestSystemClusterController(object): @@ -28,4 +27,6 @@ def test_list_clusters(self): client = dataproc_v1beta2.ClusterControllerClient() project_id_2 = project_id region = "global" - response = client.list_clusters(project_id_2, region) + response = client.list_clusters( + request={"project_id": project_id_2, "region": region} + ) diff --git a/tests/unit/gapic/dataproc_v1/__init__.py b/tests/unit/gapic/dataproc_v1/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py new file mode 100644 index 00000000..bf660db4 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py @@ -0,0 +1,2015 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.autoscaling_policy_service import ( + AutoscalingPolicyServiceAsyncClient, +) +from google.cloud.dataproc_v1.services.autoscaling_policy_service import ( + AutoscalingPolicyServiceClient, +) +from google.cloud.dataproc_v1.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1.services.autoscaling_policy_service import transports +from google.cloud.dataproc_v1.types import autoscaling_policies +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", + [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient], +) +def test_autoscaling_policy_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_autoscaling_policy_service_client_get_transport_class(): + transport = AutoscalingPolicyServiceClient.get_transport_class() + assert transport == transports.AutoscalingPolicyServiceGrpcTransport + + transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") + assert transport == transports.AutoscalingPolicyServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + AutoscalingPolicyServiceClient, + transports.AutoscalingPolicyServiceGrpcTransport, + "grpc", + ), + ( + AutoscalingPolicyServiceAsyncClient, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + AutoscalingPolicyServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AutoscalingPolicyServiceClient), +) +@mock.patch.object( + AutoscalingPolicyServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AutoscalingPolicyServiceAsyncClient), +) +def test_autoscaling_policy_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + AutoscalingPolicyServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + AutoscalingPolicyServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + AutoscalingPolicyServiceClient, + transports.AutoscalingPolicyServiceGrpcTransport, + "grpc", + ), + ( + AutoscalingPolicyServiceAsyncClient, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_autoscaling_policy_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + AutoscalingPolicyServiceClient, + transports.AutoscalingPolicyServiceGrpcTransport, + "grpc", + ), + ( + AutoscalingPolicyServiceAsyncClient, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_autoscaling_policy_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_autoscaling_policy_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = AutoscalingPolicyServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_create_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.CreateAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id="id_value", + name="name_value", + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm( + yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig( + graceful_decommission_timeout=duration.Duration(seconds=751) + ) + ), + ) + + response = client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_create_autoscaling_policy_from_dict(): + test_create_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",) + ) + + response = await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_create_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_autoscaling_policy), "__call__" + ) as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + + client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + + await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_autoscaling_policy( + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +def test_create_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_autoscaling_policy( + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +def test_update_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id="id_value", + name="name_value", + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm( + yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig( + graceful_decommission_timeout=duration.Duration(seconds=751) + ) + ), + ) + + response = client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_update_autoscaling_policy_from_dict(): + test_update_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",) + ) + + response = await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_update_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + request.policy.name = "policy.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_autoscaling_policy), "__call__" + ) as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + + client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + request.policy.name = "policy.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + + await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"] + + +def test_update_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +def test_update_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +def test_get_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.GetAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id="id_value", + name="name_value", + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm( + yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig( + graceful_decommission_timeout=duration.Duration(seconds=751) + ) + ), + ) + + response = client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_get_autoscaling_policy_from_dict(): + test_get_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",) + ) + + response = await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_get_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_autoscaling_policy), "__call__" + ) as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + + client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + + await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), name="name_value", + ) + + +def test_list_autoscaling_policies( + transport: str = "grpc", + request_type=autoscaling_policies.ListAutoscalingPoliciesRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_autoscaling_policies_from_dict(): + test_list_autoscaling_policies(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_autoscaling_policies_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.ListAutoscalingPoliciesResponse() + ) + + await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_autoscaling_policies_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_autoscaling_policies(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_autoscaling_policies_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.ListAutoscalingPoliciesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_autoscaling_policies(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent="parent_value", + ) + + +def test_list_autoscaling_policies_pager(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_autoscaling_policies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, autoscaling_policies.AutoscalingPolicy) for i in results + ) + + +def test_list_autoscaling_policies_pages(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = list(client.list_autoscaling_policies(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pager(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_autoscaling_policies(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, autoscaling_policies.AutoscalingPolicy) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pages(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_autoscaling_policies(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_delete_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_autoscaling_policy_from_dict(): + test_delete_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_autoscaling_policy), "__call__" + ) as call: + call.return_value = None + + client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = AutoscalingPolicyServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client._transport, transports.AutoscalingPolicyServiceGrpcTransport, + ) + + +def test_autoscaling_policy_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.AutoscalingPolicyServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_autoscaling_policy_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.AutoscalingPolicyServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_autoscaling_policy", + "update_autoscaling_policy", + "get_autoscaling_policy", + "list_autoscaling_policies", + "delete_autoscaling_policy", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_autoscaling_policy_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_autoscaling_policy_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + AutoscalingPolicyServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_autoscaling_policy_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_autoscaling_policy_service_host_no_port(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_autoscaling_policy_service_host_with_port(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_autoscaling_policy_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_autoscaling_policy_path(): + project = "squid" + location = "clam" + autoscaling_policy = "whelk" + + expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format( + project=project, location=location, autoscaling_policy=autoscaling_policy, + ) + actual = AutoscalingPolicyServiceClient.autoscaling_policy_path( + project, location, autoscaling_policy + ) + assert expected == actual + + +def test_parse_autoscaling_policy_path(): + expected = { + "project": "octopus", + "location": "oyster", + "autoscaling_policy": "nudibranch", + } + path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path) + assert expected == actual diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py new file mode 100644 index 00000000..072ac8ff --- /dev/null +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -0,0 +1,1841 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.cluster_controller import ( + ClusterControllerAsyncClient, +) +from google.cloud.dataproc_v1.services.cluster_controller import ClusterControllerClient +from google.cloud.dataproc_v1.services.cluster_controller import pagers +from google.cloud.dataproc_v1.services.cluster_controller import transports +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import clusters as gcd_clusters +from google.cloud.dataproc_v1.types import operations +from google.cloud.dataproc_v1.types import shared +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterControllerClient._get_default_mtls_endpoint(None) is None + assert ( + ClusterControllerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [ClusterControllerClient, ClusterControllerAsyncClient] +) +def test_cluster_controller_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_cluster_controller_client_get_transport_class(): + transport = ClusterControllerClient.get_transport_class() + assert transport == transports.ClusterControllerGrpcTransport + + transport = ClusterControllerClient.get_transport_class("grpc") + assert transport == transports.ClusterControllerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + ( + ClusterControllerAsyncClient, + transports.ClusterControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + ClusterControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterControllerClient), +) +@mock.patch.object( + ClusterControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterControllerAsyncClient), +) +def test_cluster_controller_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterControllerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterControllerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + ( + ClusterControllerAsyncClient, + transports.ClusterControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_controller_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + ( + ClusterControllerAsyncClient, + transports.ClusterControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_controller_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_cluster_controller_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ClusterControllerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_create_cluster( + transport: str = "grpc", request_type=clusters.CreateClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.CreateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + +def test_create_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + clusters.CreateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + clusters.CreateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + +def test_update_cluster( + transport: str = "grpc", request_type=clusters.UpdateClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + clusters.UpdateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + clusters.UpdateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_cluster( + transport: str = "grpc", request_type=clusters.DeleteClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.DeleteClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +def test_delete_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +def test_get_cluster(transport: str = "grpc", request_type=clusters.GetClusterRequest): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster( + project_id="project_id_value", + cluster_name="cluster_name_value", + cluster_uuid="cluster_uuid_value", + ) + + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + + assert response.project_id == "project_id_value" + + assert response.cluster_name == "cluster_name_value" + + assert response.cluster_uuid == "cluster_uuid_value" + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.GetClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.Cluster( + project_id="project_id_value", + cluster_name="cluster_name_value", + cluster_uuid="cluster_uuid_value", + ) + ) + + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + + assert response.project_id == "project_id_value" + + assert response.cluster_name == "cluster_name_value" + + assert response.cluster_uuid == "cluster_uuid_value" + + +def test_get_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +def test_get_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + clusters.GetClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + clusters.GetClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +def test_list_clusters( + transport: str = "grpc", request_type=clusters.ListClustersRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.ListClustersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.ListClustersResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +def test_list_clusters_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + clusters.ListClustersRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + clusters.ListClustersRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +def test_list_clusters_pager(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_clusters(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, clusters.Cluster) for i in results) + + +def test_list_clusters_pages(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + pages = list(client.list_clusters(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_clusters_async_pager(): + client = ClusterControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + async_pager = await client.list_clusters(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, clusters.Cluster) for i in responses) + + +@pytest.mark.asyncio +async def test_list_clusters_async_pages(): + client = ClusterControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_clusters(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_diagnose_cluster( + transport: str = "grpc", request_type=clusters.DiagnoseClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DiagnoseClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_diagnose_cluster_from_dict(): + test_diagnose_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.DiagnoseClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_diagnose_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.diagnose_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +def test_diagnose_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.diagnose_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = ClusterControllerClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterControllerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.ClusterControllerGrpcTransport,) + + +def test_cluster_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.ClusterControllerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_cluster_controller_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ClusterControllerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_cluster", + "update_cluster", + "delete_cluster", + "get_cluster", + "list_clusters", + "diagnose_cluster", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_cluster_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_cluster_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + ClusterControllerClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_cluster_controller_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_cluster_controller_host_no_port(): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_cluster_controller_host_with_port(): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_cluster_controller_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_cluster_controller_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_controller_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_controller_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_controller_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_controller_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_cluster_controller_grpc_lro_client(): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_controller_grpc_lro_async_client(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client diff --git a/tests/unit/gapic/dataproc_v1/test_job_controller.py b/tests/unit/gapic/dataproc_v1/test_job_controller.py new file mode 100644 index 00000000..83403e9f --- /dev/null +++ b/tests/unit/gapic/dataproc_v1/test_job_controller.py @@ -0,0 +1,1901 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.job_controller import JobControllerAsyncClient +from google.cloud.dataproc_v1.services.job_controller import JobControllerClient +from google.cloud.dataproc_v1.services.job_controller import pagers +from google.cloud.dataproc_v1.services.job_controller import transports +from google.cloud.dataproc_v1.types import jobs +from google.cloud.dataproc_v1.types import jobs as gcd_jobs +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobControllerClient._get_default_mtls_endpoint(None) is None + assert ( + JobControllerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [JobControllerClient, JobControllerAsyncClient] +) +def test_job_controller_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_job_controller_client_get_transport_class(): + transport = JobControllerClient.get_transport_class() + assert transport == transports.JobControllerGrpcTransport + + transport = JobControllerClient.get_transport_class("grpc") + assert transport == transports.JobControllerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + ( + JobControllerAsyncClient, + transports.JobControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + JobControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(JobControllerClient), +) +@mock.patch.object( + JobControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(JobControllerAsyncClient), +) +def test_job_controller_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobControllerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobControllerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + ( + JobControllerAsyncClient, + transports.JobControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_job_controller_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + ( + JobControllerAsyncClient, + transports.JobControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_job_controller_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_job_controller_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1.services.job_controller.transports.JobControllerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = JobControllerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_submit_job(transport: str = "grpc", request_type=jobs.SubmitJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.submit_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_submit_job_from_dict(): + test_submit_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_submit_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.SubmitJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_submit_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.submit_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +def test_submit_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +def test_submit_job_as_operation( + transport: str = "grpc", request_type=jobs.SubmitJobRequest +): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_submit_job_as_operation_from_dict(): + test_submit_job_as_operation(request_type=dict) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.SubmitJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_submit_job_as_operation_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job_as_operation( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +def test_submit_job_as_operation_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job_as_operation( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +def test_get_job(transport: str = "grpc", request_type=jobs.GetJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.GetJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_get_job_from_dict(): + test_get_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.GetJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_get_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +def test_get_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_job( + jobs.GetJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +@pytest.mark.asyncio +async def test_get_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +@pytest.mark.asyncio +async def test_get_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_job( + jobs.GetJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +def test_list_jobs(transport: str = "grpc", request_type=jobs.ListJobsRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.ListJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_jobs_from_dict(): + test_list_jobs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_jobs_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.ListJobsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.ListJobsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_jobs_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_jobs( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +def test_list_jobs_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_jobs( + jobs.ListJobsRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.ListJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_jobs( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_jobs( + jobs.ListJobsRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +def test_list_jobs_pager(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + + metadata = () + pager = client.list_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, jobs.Job) for i in results) + + +def test_list_jobs_pages(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + pages = list(client.list_jobs(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_jobs_async_pager(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + async_pager = await client.list_jobs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, jobs.Job) for i in responses) + + +@pytest.mark.asyncio +async def test_list_jobs_async_pages(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + pages = [] + async for page in (await client.list_jobs(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_update_job(transport: str = "grpc", request_type=jobs.UpdateJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.UpdateJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_update_job_from_dict(): + test_update_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.UpdateJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_cancel_job(transport: str = "grpc", request_type=jobs.CancelJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.cancel_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.CancelJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_cancel_job_from_dict(): + test_cancel_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_cancel_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.CancelJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_cancel_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.cancel_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +def test_cancel_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_job( + jobs.CancelJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_job( + jobs.CancelJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +def test_delete_job(transport: str = "grpc", request_type=jobs.DeleteJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.DeleteJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_job_from_dict(): + test_delete_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.DeleteJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +def test_delete_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_job( + jobs.DeleteJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +@pytest.mark.asyncio +async def test_delete_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +@pytest.mark.asyncio +async def test_delete_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_job( + jobs.DeleteJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = JobControllerClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobControllerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.JobControllerGrpcTransport,) + + +def test_job_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.JobControllerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_job_controller_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.JobControllerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "submit_job", + "submit_job_as_operation", + "get_job", + "list_jobs", + "update_job", + "cancel_job", + "delete_job", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_job_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_job_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + JobControllerClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_job_controller_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.JobControllerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_job_controller_host_no_port(): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_job_controller_host_with_port(): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_job_controller_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_job_controller_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_job_controller_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_job_controller_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_job_controller_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_job_controller_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_controller_grpc_lro_client(): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_controller_grpc_lro_async_client(): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py new file mode 100644 index 00000000..c88ec4ab --- /dev/null +++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -0,0 +1,2488 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.workflow_template_service import ( + WorkflowTemplateServiceAsyncClient, +) +from google.cloud.dataproc_v1.services.workflow_template_service import ( + WorkflowTemplateServiceClient, +) +from google.cloud.dataproc_v1.services.workflow_template_service import pagers +from google.cloud.dataproc_v1.services.workflow_template_service import transports +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import clusters as gcd_clusters +from google.cloud.dataproc_v1.types import jobs +from google.cloud.dataproc_v1.types import jobs as gcd_jobs +from google.cloud.dataproc_v1.types import shared +from google.cloud.dataproc_v1.types import workflow_templates +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(None) is None + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient] +) +def test_workflow_template_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_workflow_template_service_client_get_transport_class(): + transport = WorkflowTemplateServiceClient.get_transport_class() + assert transport == transports.WorkflowTemplateServiceGrpcTransport + + transport = WorkflowTemplateServiceClient.get_transport_class("grpc") + assert transport == transports.WorkflowTemplateServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + WorkflowTemplateServiceClient, + transports.WorkflowTemplateServiceGrpcTransport, + "grpc", + ), + ( + WorkflowTemplateServiceAsyncClient, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + WorkflowTemplateServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(WorkflowTemplateServiceClient), +) +@mock.patch.object( + WorkflowTemplateServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(WorkflowTemplateServiceAsyncClient), +) +def test_workflow_template_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + WorkflowTemplateServiceClient, + transports.WorkflowTemplateServiceGrpcTransport, + "grpc", + ), + ( + WorkflowTemplateServiceAsyncClient, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_workflow_template_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + WorkflowTemplateServiceClient, + transports.WorkflowTemplateServiceGrpcTransport, + "grpc", + ), + ( + WorkflowTemplateServiceAsyncClient, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_workflow_template_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_workflow_template_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = WorkflowTemplateServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_create_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.CreateWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + + response = client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_create_workflow_template_from_dict(): + test_create_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.CreateWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + ) + + response = await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_create_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_workflow_template), "__call__" + ) as call: + call.return_value = workflow_templates.WorkflowTemplate() + + client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + + await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +def test_create_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +def test_get_workflow_template( + transport: str = "grpc", request_type=workflow_templates.GetWorkflowTemplateRequest +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + + response = client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_get_workflow_template_from_dict(): + test_get_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.GetWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + ) + + response = await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_get_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_workflow_template), "__call__" + ) as call: + call.return_value = workflow_templates.WorkflowTemplate() + + client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + + await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), name="name_value", + ) + + +def test_instantiate_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.InstantiateWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_workflow_template_from_dict(): + test_instantiate_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_workflow_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_instantiate_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_workflow_template( + name="name_value", parameters={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].parameters == {"key_value": "value_value"} + + +def test_instantiate_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name="name_value", + parameters={"key_value": "value_value"}, + ) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_workflow_template( + name="name_value", parameters={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].parameters == {"key_value": "value_value"} + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name="name_value", + parameters={"key_value": "value_value"}, + ) + + +def test_instantiate_inline_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_inline_workflow_template_from_dict(): + test_instantiate_inline_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_async( + transport: str = "grpc_asyncio", +): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_inline_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_instantiate_inline_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_inline_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +def test_instantiate_inline_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_inline_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +def test_update_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.UpdateWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + + response = client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_update_workflow_template_from_dict(): + test_update_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.UpdateWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + ) + + response = await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_update_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + request.template.name = "template.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_workflow_template), "__call__" + ) as call: + call.return_value = workflow_templates.WorkflowTemplate() + + client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "template.name=template.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + request.template.name = "template.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + + await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "template.name=template.name/value",) in kw[ + "metadata" + ] + + +def test_update_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +def test_update_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +def test_list_workflow_templates( + transport: str = "grpc", + request_type=workflow_templates.ListWorkflowTemplatesRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_workflow_templates_from_dict(): + test_list_workflow_templates(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.ListWorkflowTemplatesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.ListWorkflowTemplatesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_workflow_templates_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_workflow_templates_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.ListWorkflowTemplatesResponse() + ) + + await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_workflow_templates_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_workflow_templates(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_workflow_templates_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.ListWorkflowTemplatesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_workflow_templates(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), parent="parent_value", + ) + + +def test_list_workflow_templates_pager(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_workflow_templates(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, workflow_templates.WorkflowTemplate) for i in results) + + +def test_list_workflow_templates_pages(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = list(client.list_workflow_templates(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pager(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_workflow_templates(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, workflow_templates.WorkflowTemplate) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pages(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_workflow_templates(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_delete_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.DeleteWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_workflow_template_from_dict(): + test_delete_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.DeleteWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_workflow_template), "__call__" + ) as call: + call.return_value = None + + client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = WorkflowTemplateServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client._transport, transports.WorkflowTemplateServiceGrpcTransport, + ) + + +def test_workflow_template_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.WorkflowTemplateServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_workflow_template_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.WorkflowTemplateServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_workflow_template", + "get_workflow_template", + "instantiate_workflow_template", + "instantiate_inline_workflow_template", + "update_workflow_template", + "list_workflow_templates", + "delete_workflow_template", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_workflow_template_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_workflow_template_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + WorkflowTemplateServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_workflow_template_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_workflow_template_service_host_no_port(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_workflow_template_service_host_with_port(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_workflow_template_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_workflow_template_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_workflow_template_service_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_workflow_template_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_workflow_template_service_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_workflow_template_service_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_workflow_template_service_grpc_lro_client(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_workflow_template_service_grpc_lro_async_client(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_workflow_template_path(): + project = "squid" + region = "clam" + workflow_template = "whelk" + + expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( + project=project, region=region, workflow_template=workflow_template, + ) + actual = WorkflowTemplateServiceClient.workflow_template_path( + project, region, workflow_template + ) + assert expected == actual + + +def test_parse_workflow_template_path(): + expected = { + "project": "octopus", + "region": "oyster", + "workflow_template": "nudibranch", + } + path = WorkflowTemplateServiceClient.workflow_template_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_workflow_template_path(path) + assert expected == actual diff --git a/tests/unit/gapic/dataproc_v1beta2/__init__.py b/tests/unit/gapic/dataproc_v1beta2/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1beta2/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py new file mode 100644 index 00000000..cfe27251 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1beta2/test_autoscaling_policy_service.py @@ -0,0 +1,2015 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import ( + AutoscalingPolicyServiceAsyncClient, +) +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import ( + AutoscalingPolicyServiceClient, +) +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import pagers +from google.cloud.dataproc_v1beta2.services.autoscaling_policy_service import transports +from google.cloud.dataproc_v1beta2.types import autoscaling_policies +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoscalingPolicyServiceClient._get_default_mtls_endpoint(None) is None + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + AutoscalingPolicyServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", + [AutoscalingPolicyServiceClient, AutoscalingPolicyServiceAsyncClient], +) +def test_autoscaling_policy_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_autoscaling_policy_service_client_get_transport_class(): + transport = AutoscalingPolicyServiceClient.get_transport_class() + assert transport == transports.AutoscalingPolicyServiceGrpcTransport + + transport = AutoscalingPolicyServiceClient.get_transport_class("grpc") + assert transport == transports.AutoscalingPolicyServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + AutoscalingPolicyServiceClient, + transports.AutoscalingPolicyServiceGrpcTransport, + "grpc", + ), + ( + AutoscalingPolicyServiceAsyncClient, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + AutoscalingPolicyServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AutoscalingPolicyServiceClient), +) +@mock.patch.object( + AutoscalingPolicyServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(AutoscalingPolicyServiceAsyncClient), +) +def test_autoscaling_policy_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + AutoscalingPolicyServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + AutoscalingPolicyServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + AutoscalingPolicyServiceClient, + transports.AutoscalingPolicyServiceGrpcTransport, + "grpc", + ), + ( + AutoscalingPolicyServiceAsyncClient, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_autoscaling_policy_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + AutoscalingPolicyServiceClient, + transports.AutoscalingPolicyServiceGrpcTransport, + "grpc", + ), + ( + AutoscalingPolicyServiceAsyncClient, + transports.AutoscalingPolicyServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_autoscaling_policy_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_autoscaling_policy_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = AutoscalingPolicyServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_create_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.CreateAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id="id_value", + name="name_value", + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm( + yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig( + graceful_decommission_timeout=duration.Duration(seconds=751) + ) + ), + ) + + response = client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_create_autoscaling_policy_from_dict(): + test_create_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",) + ) + + response = await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_create_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_autoscaling_policy), "__call__" + ) as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + + client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.CreateAutoscalingPolicyRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + + await client.create_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_autoscaling_policy( + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +def test_create_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_autoscaling_policy( + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +@pytest.mark.asyncio +async def test_create_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_autoscaling_policy( + autoscaling_policies.CreateAutoscalingPolicyRequest(), + parent="parent_value", + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +def test_update_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.UpdateAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id="id_value", + name="name_value", + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm( + yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig( + graceful_decommission_timeout=duration.Duration(seconds=751) + ) + ), + ) + + response = client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_update_autoscaling_policy_from_dict(): + test_update_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",) + ) + + response = await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_update_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + request.policy.name = "policy.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_autoscaling_policy), "__call__" + ) as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + + client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.UpdateAutoscalingPolicyRequest() + request.policy.name = "policy.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + + await client.update_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "policy.name=policy.name/value",) in kw["metadata"] + + +def test_update_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +def test_update_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_autoscaling_policy( + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].policy == autoscaling_policies.AutoscalingPolicy(id="id_value") + + +@pytest.mark.asyncio +async def test_update_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_autoscaling_policy( + autoscaling_policies.UpdateAutoscalingPolicyRequest(), + policy=autoscaling_policies.AutoscalingPolicy(id="id_value"), + ) + + +def test_get_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.GetAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy( + id="id_value", + name="name_value", + basic_algorithm=autoscaling_policies.BasicAutoscalingAlgorithm( + yarn_config=autoscaling_policies.BasicYarnAutoscalingConfig( + graceful_decommission_timeout=duration.Duration(seconds=751) + ) + ), + ) + + response = client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.GetAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_get_autoscaling_policy_from_dict(): + test_get_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy(id="id_value", name="name_value",) + ) + + response = await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, autoscaling_policies.AutoscalingPolicy) + + assert response.id == "id_value" + + assert response.name == "name_value" + + +def test_get_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_autoscaling_policy), "__call__" + ) as call: + call.return_value = autoscaling_policies.AutoscalingPolicy() + + client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.GetAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + + await client.get_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.AutoscalingPolicy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.AutoscalingPolicy() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_autoscaling_policy( + autoscaling_policies.GetAutoscalingPolicyRequest(), name="name_value", + ) + + +def test_list_autoscaling_policies( + transport: str = "grpc", + request_type=autoscaling_policies.ListAutoscalingPoliciesRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_autoscaling_policies_from_dict(): + test_list_autoscaling_policies(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.ListAutoscalingPoliciesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAutoscalingPoliciesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_autoscaling_policies_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.ListAutoscalingPoliciesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.ListAutoscalingPoliciesResponse() + ) + + await client.list_autoscaling_policies(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_autoscaling_policies_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_autoscaling_policies(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_autoscaling_policies_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = autoscaling_policies.ListAutoscalingPoliciesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + autoscaling_policies.ListAutoscalingPoliciesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_autoscaling_policies(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_autoscaling_policies( + autoscaling_policies.ListAutoscalingPoliciesRequest(), + parent="parent_value", + ) + + +def test_list_autoscaling_policies_pager(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_autoscaling_policies(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all( + isinstance(i, autoscaling_policies.AutoscalingPolicy) for i in results + ) + + +def test_list_autoscaling_policies_pages(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_autoscaling_policies), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = list(client.list_autoscaling_policies(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pager(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_autoscaling_policies(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, autoscaling_policies.AutoscalingPolicy) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_autoscaling_policies_async_pages(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_autoscaling_policies), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + next_page_token="abc", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[], next_page_token="def", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[autoscaling_policies.AutoscalingPolicy(),], + next_page_token="ghi", + ), + autoscaling_policies.ListAutoscalingPoliciesResponse( + policies=[ + autoscaling_policies.AutoscalingPolicy(), + autoscaling_policies.AutoscalingPolicy(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_autoscaling_policies(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_delete_autoscaling_policy( + transport: str = "grpc", + request_type=autoscaling_policies.DeleteAutoscalingPolicyRequest, +): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_autoscaling_policy_from_dict(): + test_delete_autoscaling_policy(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_async(transport: str = "grpc_asyncio"): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_autoscaling_policy_field_headers(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_autoscaling_policy), "__call__" + ) as call: + call.return_value = None + + client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_field_headers_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = autoscaling_policies.DeleteAutoscalingPolicyRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_autoscaling_policy), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_autoscaling_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_autoscaling_policy_flattened(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_autoscaling_policy_flattened_error(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_autoscaling_policy), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_autoscaling_policy(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_autoscaling_policy_flattened_error_async(): + client = AutoscalingPolicyServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_autoscaling_policy( + autoscaling_policies.DeleteAutoscalingPolicyRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalingPolicyServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = AutoscalingPolicyServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalingPolicyServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client._transport, transports.AutoscalingPolicyServiceGrpcTransport, + ) + + +def test_autoscaling_policy_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.AutoscalingPolicyServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_autoscaling_policy_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.AutoscalingPolicyServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_autoscaling_policy", + "update_autoscaling_policy", + "get_autoscaling_policy", + "list_autoscaling_policies", + "delete_autoscaling_policy", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +def test_autoscaling_policy_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1beta2.services.autoscaling_policy_service.transports.AutoscalingPolicyServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.AutoscalingPolicyServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_autoscaling_policy_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + AutoscalingPolicyServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_autoscaling_policy_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_autoscaling_policy_service_host_no_port(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_autoscaling_policy_service_host_with_port(): + client = AutoscalingPolicyServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_autoscaling_policy_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_autoscaling_policy_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoscalingPolicyServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_autoscaling_policy_service_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.AutoscalingPolicyServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_autoscaling_policy_path(): + project = "squid" + location = "clam" + autoscaling_policy = "whelk" + + expected = "projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}".format( + project=project, location=location, autoscaling_policy=autoscaling_policy, + ) + actual = AutoscalingPolicyServiceClient.autoscaling_policy_path( + project, location, autoscaling_policy + ) + assert expected == actual + + +def test_parse_autoscaling_policy_path(): + expected = { + "project": "octopus", + "location": "oyster", + "autoscaling_policy": "nudibranch", + } + path = AutoscalingPolicyServiceClient.autoscaling_policy_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalingPolicyServiceClient.parse_autoscaling_policy_path(path) + assert expected == actual diff --git a/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py new file mode 100644 index 00000000..1dbc1a36 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1beta2/test_cluster_controller.py @@ -0,0 +1,1843 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.cluster_controller import ( + ClusterControllerAsyncClient, +) +from google.cloud.dataproc_v1beta2.services.cluster_controller import ( + ClusterControllerClient, +) +from google.cloud.dataproc_v1beta2.services.cluster_controller import pagers +from google.cloud.dataproc_v1beta2.services.cluster_controller import transports +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import clusters as gcd_clusters +from google.cloud.dataproc_v1beta2.types import operations +from google.cloud.dataproc_v1beta2.types import shared +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterControllerClient._get_default_mtls_endpoint(None) is None + assert ( + ClusterControllerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ClusterControllerClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [ClusterControllerClient, ClusterControllerAsyncClient] +) +def test_cluster_controller_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_cluster_controller_client_get_transport_class(): + transport = ClusterControllerClient.get_transport_class() + assert transport == transports.ClusterControllerGrpcTransport + + transport = ClusterControllerClient.get_transport_class("grpc") + assert transport == transports.ClusterControllerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + ( + ClusterControllerAsyncClient, + transports.ClusterControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + ClusterControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterControllerClient), +) +@mock.patch.object( + ClusterControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterControllerAsyncClient), +) +def test_cluster_controller_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterControllerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterControllerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + ( + ClusterControllerAsyncClient, + transports.ClusterControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_controller_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (ClusterControllerClient, transports.ClusterControllerGrpcTransport, "grpc"), + ( + ClusterControllerAsyncClient, + transports.ClusterControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_cluster_controller_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_cluster_controller_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ClusterControllerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_create_cluster( + transport: str = "grpc", request_type=clusters.CreateClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.CreateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + +def test_create_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + clusters.CreateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + clusters.CreateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster=clusters.Cluster(project_id="project_id_value"), + ) + + +def test_update_cluster( + transport: str = "grpc", request_type=clusters.UpdateClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.UpdateClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +def test_update_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + clusters.UpdateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + assert args[0].cluster == clusters.Cluster(project_id="project_id_value") + + assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"]) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + clusters.UpdateClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + cluster=clusters.Cluster(project_id="project_id_value"), + update_mask=field_mask.FieldMask(paths=["paths_value"]), + ) + + +def test_delete_cluster( + transport: str = "grpc", request_type=clusters.DeleteClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.DeleteClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +def test_delete_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + clusters.DeleteClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +def test_get_cluster(transport: str = "grpc", request_type=clusters.GetClusterRequest): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster( + project_id="project_id_value", + cluster_name="cluster_name_value", + cluster_uuid="cluster_uuid_value", + ) + + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + + assert response.project_id == "project_id_value" + + assert response.cluster_name == "cluster_name_value" + + assert response.cluster_uuid == "cluster_uuid_value" + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.GetClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.Cluster( + project_id="project_id_value", + cluster_name="cluster_name_value", + cluster_uuid="cluster_uuid_value", + ) + ) + + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.Cluster) + + assert response.project_id == "project_id_value" + + assert response.cluster_name == "cluster_name_value" + + assert response.cluster_uuid == "cluster_uuid_value" + + +def test_get_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +def test_get_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + clusters.GetClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + clusters.GetClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +def test_list_clusters( + transport: str = "grpc", request_type=clusters.ListClustersRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.ListClustersRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.ListClustersResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListClustersAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_clusters_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +def test_list_clusters_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + clusters.ListClustersRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.ListClustersResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + clusters.ListClustersRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +def test_list_clusters_pager(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + + metadata = () + pager = client.list_clusters(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, clusters.Cluster) for i in results) + + +def test_list_clusters_pages(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + pages = list(client.list_clusters(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_clusters_async_pager(): + client = ClusterControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + async_pager = await client.list_clusters(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, clusters.Cluster) for i in responses) + + +@pytest.mark.asyncio +async def test_list_clusters_async_pages(): + client = ClusterControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_clusters), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(), clusters.Cluster(),], + next_page_token="abc", + ), + clusters.ListClustersResponse(clusters=[], next_page_token="def",), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(),], next_page_token="ghi", + ), + clusters.ListClustersResponse( + clusters=[clusters.Cluster(), clusters.Cluster(),], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_clusters(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_diagnose_cluster( + transport: str = "grpc", request_type=clusters.DiagnoseClusterRequest +): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == clusters.DiagnoseClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_diagnose_cluster_from_dict(): + test_diagnose_cluster(request_type=dict) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_async(transport: str = "grpc_asyncio"): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = clusters.DiagnoseClusterRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.diagnose_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_diagnose_cluster_flattened(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.diagnose_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +def test_diagnose_cluster_flattened_error(): + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.diagnose_cluster), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.diagnose_cluster( + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].cluster_name == "cluster_name_value" + + +@pytest.mark.asyncio +async def test_diagnose_cluster_flattened_error_async(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.diagnose_cluster( + clusters.DiagnoseClusterRequest(), + project_id="project_id_value", + region="region_value", + cluster_name="cluster_name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterControllerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = ClusterControllerClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterControllerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterControllerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.ClusterControllerGrpcTransport,) + + +def test_cluster_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.ClusterControllerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_cluster_controller_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ClusterControllerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_cluster", + "update_cluster", + "delete_cluster", + "get_cluster", + "list_clusters", + "diagnose_cluster", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_cluster_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1beta2.services.cluster_controller.transports.ClusterControllerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ClusterControllerTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_cluster_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + ClusterControllerClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_cluster_controller_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_cluster_controller_host_no_port(): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_cluster_controller_host_with_port(): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_cluster_controller_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_cluster_controller_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_controller_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_controller_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_cluster_controller_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_cluster_controller_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.ClusterControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_cluster_controller_grpc_lro_client(): + client = ClusterControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_cluster_controller_grpc_lro_async_client(): + client = ClusterControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client diff --git a/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py new file mode 100644 index 00000000..89e093e6 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1beta2/test_job_controller.py @@ -0,0 +1,1927 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.job_controller import ( + JobControllerAsyncClient, +) +from google.cloud.dataproc_v1beta2.services.job_controller import JobControllerClient +from google.cloud.dataproc_v1beta2.services.job_controller import pagers +from google.cloud.dataproc_v1beta2.services.job_controller import transports +from google.cloud.dataproc_v1beta2.types import jobs +from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 as field_mask # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert JobControllerClient._get_default_mtls_endpoint(None) is None + assert ( + JobControllerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + JobControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [JobControllerClient, JobControllerAsyncClient] +) +def test_job_controller_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_job_controller_client_get_transport_class(): + transport = JobControllerClient.get_transport_class() + assert transport == transports.JobControllerGrpcTransport + + transport = JobControllerClient.get_transport_class("grpc") + assert transport == transports.JobControllerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + ( + JobControllerAsyncClient, + transports.JobControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + JobControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(JobControllerClient), +) +@mock.patch.object( + JobControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(JobControllerAsyncClient), +) +def test_job_controller_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(JobControllerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(JobControllerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + ( + JobControllerAsyncClient, + transports.JobControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_job_controller_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (JobControllerClient, transports.JobControllerGrpcTransport, "grpc"), + ( + JobControllerAsyncClient, + transports.JobControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_job_controller_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_job_controller_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = JobControllerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_submit_job(transport: str = "grpc", request_type=jobs.SubmitJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.submit_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_submit_job_from_dict(): + test_submit_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_submit_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.SubmitJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.submit_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_submit_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.submit_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +def test_submit_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +@pytest.mark.asyncio +async def test_submit_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +def test_submit_job_as_operation( + transport: str = "grpc", request_type=jobs.SubmitJobRequest +): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.SubmitJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_submit_job_as_operation_from_dict(): + test_submit_job_as_operation(request_type=dict) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.SubmitJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.submit_job_as_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_submit_job_as_operation_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.submit_job_as_operation( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +def test_submit_job_as_operation_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.submit_job_as_operation), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.submit_job_as_operation( + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job == jobs.Job( + reference=jobs.JobReference(project_id="project_id_value") + ) + + +@pytest.mark.asyncio +async def test_submit_job_as_operation_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.submit_job_as_operation( + jobs.SubmitJobRequest(), + project_id="project_id_value", + region="region_value", + job=jobs.Job(reference=jobs.JobReference(project_id="project_id_value")), + ) + + +def test_get_job(transport: str = "grpc", request_type=jobs.GetJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.GetJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_get_job_from_dict(): + test_get_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.GetJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.get_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_get_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +def test_get_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_job( + jobs.GetJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +@pytest.mark.asyncio +async def test_get_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._client._transport.get_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +@pytest.mark.asyncio +async def test_get_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_job( + jobs.GetJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +def test_list_jobs(transport: str = "grpc", request_type=jobs.ListJobsRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.ListJobsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_jobs_from_dict(): + test_list_jobs(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_jobs_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.ListJobsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.ListJobsResponse(next_page_token="next_page_token_value",) + ) + + response = await client.list_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListJobsAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_jobs_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_jobs( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +def test_list_jobs_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_jobs( + jobs.ListJobsRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.ListJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.ListJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_jobs( + project_id="project_id_value", region="region_value", filter="filter_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].filter == "filter_value" + + +@pytest.mark.asyncio +async def test_list_jobs_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_jobs( + jobs.ListJobsRequest(), + project_id="project_id_value", + region="region_value", + filter="filter_value", + ) + + +def test_list_jobs_pager(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + + metadata = () + pager = client.list_jobs(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, jobs.Job) for i in results) + + +def test_list_jobs_pages(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.list_jobs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + pages = list(client.list_jobs(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_jobs_async_pager(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + async_pager = await client.list_jobs(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, jobs.Job) for i in responses) + + +@pytest.mark.asyncio +async def test_list_jobs_async_pages(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_jobs), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + jobs.ListJobsResponse( + jobs=[jobs.Job(), jobs.Job(), jobs.Job(),], next_page_token="abc", + ), + jobs.ListJobsResponse(jobs=[], next_page_token="def",), + jobs.ListJobsResponse(jobs=[jobs.Job(),], next_page_token="ghi",), + jobs.ListJobsResponse(jobs=[jobs.Job(), jobs.Job(),],), + RuntimeError, + ) + pages = [] + async for page in (await client.list_jobs(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_update_job(transport: str = "grpc", request_type=jobs.UpdateJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.update_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.UpdateJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_update_job_from_dict(): + test_update_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.UpdateJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.update_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_cancel_job(transport: str = "grpc", request_type=jobs.CancelJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.cancel_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + hadoop_job=jobs.HadoopJob(main_jar_file_uri="main_jar_file_uri_value"), + ) + + response = client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.CancelJobRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_cancel_job_from_dict(): + test_cancel_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_cancel_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.CancelJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + jobs.Job( + submitted_by="submitted_by_value", + driver_output_resource_uri="driver_output_resource_uri_value", + driver_control_files_uri="driver_control_files_uri_value", + job_uuid="job_uuid_value", + done=True, + ) + ) + + response = await client.cancel_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, jobs.Job) + + assert response.submitted_by == "submitted_by_value" + + assert response.driver_output_resource_uri == "driver_output_resource_uri_value" + + assert response.driver_control_files_uri == "driver_control_files_uri_value" + + assert response.job_uuid == "job_uuid_value" + + assert response.done is True + + +def test_cancel_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.cancel_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +def test_cancel_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_job( + jobs.CancelJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.cancel_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = jobs.Job() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(jobs.Job()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +@pytest.mark.asyncio +async def test_cancel_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_job( + jobs.CancelJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +def test_delete_job(transport: str = "grpc", request_type=jobs.DeleteJobRequest): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == jobs.DeleteJobRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_job_from_dict(): + test_delete_job(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_job_async(transport: str = "grpc_asyncio"): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = jobs.DeleteJobRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_job_flattened(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client._transport.delete_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +def test_delete_job_flattened_error(): + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_job( + jobs.DeleteJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +@pytest.mark.asyncio +async def test_delete_job_flattened_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_job( + project_id="project_id_value", region="region_value", job_id="job_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].project_id == "project_id_value" + + assert args[0].region == "region_value" + + assert args[0].job_id == "job_id_value" + + +@pytest.mark.asyncio +async def test_delete_job_flattened_error_async(): + client = JobControllerAsyncClient(credentials=credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_job( + jobs.DeleteJobRequest(), + project_id="project_id_value", + region="region_value", + job_id="job_id_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = JobControllerClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = JobControllerClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.JobControllerGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.JobControllerGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = JobControllerClient(credentials=credentials.AnonymousCredentials(),) + assert isinstance(client._transport, transports.JobControllerGrpcTransport,) + + +def test_job_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.JobControllerTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_job_controller_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.JobControllerTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "submit_job", + "submit_job_as_operation", + "get_job", + "list_jobs", + "update_job", + "cancel_job", + "delete_job", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_job_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1beta2.services.job_controller.transports.JobControllerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.JobControllerTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_job_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + JobControllerClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_job_controller_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.JobControllerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_job_controller_host_no_port(): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_job_controller_host_with_port(): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_job_controller_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_job_controller_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_job_controller_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_job_controller_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_job_controller_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.JobControllerGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_job_controller_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.JobControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_job_controller_grpc_lro_client(): + client = JobControllerClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_job_controller_grpc_lro_async_client(): + client = JobControllerAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client diff --git a/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py new file mode 100644 index 00000000..07ea3560 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1beta2/test_workflow_template_service.py @@ -0,0 +1,2488 @@ +# -*- coding: utf-8 -*- + +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from google import auth +from google.api_core import client_options +from google.api_core import exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation_async +from google.api_core import operations_v1 +from google.auth import credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1beta2.services.workflow_template_service import ( + WorkflowTemplateServiceAsyncClient, +) +from google.cloud.dataproc_v1beta2.services.workflow_template_service import ( + WorkflowTemplateServiceClient, +) +from google.cloud.dataproc_v1beta2.services.workflow_template_service import pagers +from google.cloud.dataproc_v1beta2.services.workflow_template_service import transports +from google.cloud.dataproc_v1beta2.types import clusters +from google.cloud.dataproc_v1beta2.types import clusters as gcd_clusters +from google.cloud.dataproc_v1beta2.types import jobs +from google.cloud.dataproc_v1beta2.types import jobs as gcd_jobs +from google.cloud.dataproc_v1beta2.types import shared +from google.cloud.dataproc_v1beta2.types import workflow_templates +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 as duration # type: ignore +from google.protobuf import timestamp_pb2 as timestamp # type: ignore + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert WorkflowTemplateServiceClient._get_default_mtls_endpoint(None) is None + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + WorkflowTemplateServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class", [WorkflowTemplateServiceClient, WorkflowTemplateServiceAsyncClient] +) +def test_workflow_template_service_client_from_service_account_file(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client._transport._credentials == creds + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client._transport._credentials == creds + + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_workflow_template_service_client_get_transport_class(): + transport = WorkflowTemplateServiceClient.get_transport_class() + assert transport == transports.WorkflowTemplateServiceGrpcTransport + + transport = WorkflowTemplateServiceClient.get_transport_class("grpc") + assert transport == transports.WorkflowTemplateServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + WorkflowTemplateServiceClient, + transports.WorkflowTemplateServiceGrpcTransport, + "grpc", + ), + ( + WorkflowTemplateServiceAsyncClient, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + WorkflowTemplateServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(WorkflowTemplateServiceClient), +) +@mock.patch.object( + WorkflowTemplateServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(WorkflowTemplateServiceAsyncClient), +) +def test_workflow_template_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(WorkflowTemplateServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=client_cert_source_callback, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", and default_client_cert_source is provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is + # "auto", but client_cert_source and default_client_cert_source are None. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "auto"}): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + WorkflowTemplateServiceClient, + transports.WorkflowTemplateServiceGrpcTransport, + "grpc", + ), + ( + WorkflowTemplateServiceAsyncClient, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_workflow_template_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions(scopes=["1", "2"],) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + WorkflowTemplateServiceClient, + transports.WorkflowTemplateServiceGrpcTransport, + "grpc", + ), + ( + WorkflowTemplateServiceAsyncClient, + transports.WorkflowTemplateServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_workflow_template_service_client_client_options_credentials_file( + client_class, transport_class, transport_name +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + api_mtls_endpoint=client.DEFAULT_ENDPOINT, + client_cert_source=None, + quota_project_id=None, + ) + + +def test_workflow_template_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = WorkflowTemplateServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + api_mtls_endpoint="squid.clam.whelk", + client_cert_source=None, + quota_project_id=None, + ) + + +def test_create_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.CreateWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + + response = client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.CreateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_create_workflow_template_from_dict(): + test_create_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_create_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.CreateWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + ) + + response = await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_create_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_workflow_template), "__call__" + ) as call: + call.return_value = workflow_templates.WorkflowTemplate() + + client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.CreateWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + + await client.create_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +def test_create_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.create_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +@pytest.mark.asyncio +async def test_create_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_workflow_template( + workflow_templates.CreateWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +def test_get_workflow_template( + transport: str = "grpc", request_type=workflow_templates.GetWorkflowTemplateRequest +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + + response = client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.GetWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_get_workflow_template_from_dict(): + test_get_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_get_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.GetWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + ) + + response = await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_get_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_workflow_template), "__call__" + ) as call: + call.return_value = workflow_templates.WorkflowTemplate() + + client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.GetWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + + await client.get_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_get_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.get_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_workflow_template( + workflow_templates.GetWorkflowTemplateRequest(), name="name_value", + ) + + +def test_instantiate_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.InstantiateWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_workflow_template_from_dict(): + test_instantiate_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_workflow_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.instantiate_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_instantiate_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_workflow_template( + name="name_value", parameters={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].parameters == {"key_value": "value_value"} + + +def test_instantiate_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name="name_value", + parameters={"key_value": "value_value"}, + ) + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_workflow_template( + name="name_value", parameters={"key_value": "value_value"}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + assert args[0].parameters == {"key_value": "value_value"} + + +@pytest.mark.asyncio +async def test_instantiate_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_workflow_template( + workflow_templates.InstantiateWorkflowTemplateRequest(), + name="name_value", + parameters={"key_value": "value_value"}, + ) + + +def test_instantiate_inline_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.InstantiateInlineWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + + response = client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_inline_workflow_template_from_dict(): + test_instantiate_inline_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_async( + transport: str = "grpc_asyncio", +): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + + response = await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_instantiate_inline_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + + client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.InstantiateInlineWorkflowTemplateRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + + await client.instantiate_inline_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_instantiate_inline_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.instantiate_inline_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +def test_instantiate_inline_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.instantiate_inline_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.instantiate_inline_workflow_template( + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +@pytest.mark.asyncio +async def test_instantiate_inline_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.instantiate_inline_workflow_template( + workflow_templates.InstantiateInlineWorkflowTemplateRequest(), + parent="parent_value", + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +def test_update_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.UpdateWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + + response = client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.UpdateWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_update_workflow_template_from_dict(): + test_update_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_update_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.UpdateWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate( + id="id_value", name="name_value", version=774, + ) + ) + + response = await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, workflow_templates.WorkflowTemplate) + + assert response.id == "id_value" + + assert response.name == "name_value" + + assert response.version == 774 + + +def test_update_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + request.template.name = "template.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_workflow_template), "__call__" + ) as call: + call.return_value = workflow_templates.WorkflowTemplate() + + client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "template.name=template.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.UpdateWorkflowTemplateRequest() + request.template.name = "template.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + + await client.update_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "template.name=template.name/value",) in kw[ + "metadata" + ] + + +def test_update_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +def test_update_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.update_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.WorkflowTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.WorkflowTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_workflow_template( + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].template == workflow_templates.WorkflowTemplate(id="id_value") + + +@pytest.mark.asyncio +async def test_update_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_workflow_template( + workflow_templates.UpdateWorkflowTemplateRequest(), + template=workflow_templates.WorkflowTemplate(id="id_value"), + ) + + +def test_list_workflow_templates( + transport: str = "grpc", + request_type=workflow_templates.ListWorkflowTemplatesRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse( + next_page_token="next_page_token_value", + ) + + response = client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.ListWorkflowTemplatesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_workflow_templates_from_dict(): + test_list_workflow_templates(request_type=dict) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.ListWorkflowTemplatesRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.ListWorkflowTemplatesResponse( + next_page_token="next_page_token_value", + ) + ) + + response = await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListWorkflowTemplatesAsyncPager) + + assert response.next_page_token == "next_page_token_value" + + +def test_list_workflow_templates_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_workflow_templates_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.ListWorkflowTemplatesRequest() + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.ListWorkflowTemplatesResponse() + ) + + await client.list_workflow_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_workflow_templates_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_workflow_templates(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +def test_list_workflow_templates_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = workflow_templates.ListWorkflowTemplatesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + workflow_templates.ListWorkflowTemplatesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_workflow_templates(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_workflow_templates_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_workflow_templates( + workflow_templates.ListWorkflowTemplatesRequest(), parent="parent_value", + ) + + +def test_list_workflow_templates_pager(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_workflow_templates(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, workflow_templates.WorkflowTemplate) for i in results) + + +def test_list_workflow_templates_pages(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.list_workflow_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = list(client.list_workflow_templates(request={}).pages) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pager(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_workflow_templates(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, workflow_templates.WorkflowTemplate) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_workflow_templates_async_pages(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.list_workflow_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + next_page_token="abc", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[], next_page_token="def", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[workflow_templates.WorkflowTemplate(),], + next_page_token="ghi", + ), + workflow_templates.ListWorkflowTemplatesResponse( + templates=[ + workflow_templates.WorkflowTemplate(), + workflow_templates.WorkflowTemplate(), + ], + ), + RuntimeError, + ) + pages = [] + async for page in (await client.list_workflow_templates(request={})).pages: + pages.append(page) + for page, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page.raw_page.next_page_token == token + + +def test_delete_workflow_template( + transport: str = "grpc", + request_type=workflow_templates.DeleteWorkflowTemplateRequest, +): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == workflow_templates.DeleteWorkflowTemplateRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_workflow_template_from_dict(): + test_delete_workflow_template(request_type=dict) + + +@pytest.mark.asyncio +async def test_delete_workflow_template_async(transport: str = "grpc_asyncio"): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = workflow_templates.DeleteWorkflowTemplateRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + response = await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_workflow_template_field_headers(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_workflow_template), "__call__" + ) as call: + call.return_value = None + + client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_workflow_template_field_headers_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = workflow_templates.DeleteWorkflowTemplateRequest() + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_workflow_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + + await client.delete_workflow_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_workflow_template_flattened(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +def test_delete_workflow_template_flattened_error(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client._client._transport.delete_workflow_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_workflow_template(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_workflow_template_flattened_error_async(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_workflow_template( + workflow_templates.DeleteWorkflowTemplateRequest(), name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = WorkflowTemplateServiceClient( + client_options={"scopes": ["1", "2"]}, transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + client = WorkflowTemplateServiceClient(transport=transport) + assert client._transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.WorkflowTemplateServiceGrpcTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + credentials=credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + ) + assert isinstance( + client._transport, transports.WorkflowTemplateServiceGrpcTransport, + ) + + +def test_workflow_template_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(exceptions.DuplicateCredentialArgs): + transport = transports.WorkflowTemplateServiceTransport( + credentials=credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_workflow_template_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.WorkflowTemplateServiceTransport( + credentials=credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_workflow_template", + "get_workflow_template", + "instantiate_workflow_template", + "instantiate_inline_workflow_template", + "update_workflow_template", + "list_workflow_templates", + "delete_workflow_template", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + +def test_workflow_template_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1beta2.services.workflow_template_service.transports.WorkflowTemplateServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.WorkflowTemplateServiceTransport( + credentials_file="credentials.json", quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_workflow_template_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + WorkflowTemplateServiceClient() + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +def test_workflow_template_service_transport_auth_adc(): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) + adc.assert_called_once_with( + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_workflow_template_service_host_no_port(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:443" + + +def test_workflow_template_service_host_with_port(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + ) + assert client._transport._host == "dataproc.googleapis.com:8000" + + +def test_workflow_template_service_grpc_transport_channel(): + channel = grpc.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +def test_workflow_template_service_grpc_asyncio_transport_channel(): + channel = aio.insecure_channel("http://localhost/") + + # Check that if channel is provided, mtls endpoint and client_cert_source + # won't be used. + callback = mock.MagicMock() + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=callback, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert not callback.called + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_workflow_template_service_grpc_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@mock.patch("grpc.ssl_channel_credentials", autospec=True) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_workflow_template_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source( + grpc_create_channel, grpc_ssl_channel_cred +): + # Check that if channel is None, but api_mtls_endpoint and client_cert_source + # are provided, then a mTLS channel will be created. + mock_cred = mock.Mock() + + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) +def test_workflow_template_service_grpc_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.WorkflowTemplateServiceGrpcTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +@pytest.mark.parametrize( + "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] +) +@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) +def test_workflow_template_service_grpc_asyncio_transport_channel_mtls_with_adc( + grpc_create_channel, api_mtls_endpoint +): + # Check that if channel and client_cert_source are None, but api_mtls_endpoint + # is provided, then a mTLS channel will be created with SSL ADC. + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + # Mock google.auth.transport.grpc.SslCredentials class. + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + mock_cred = mock.Mock() + transport = transports.WorkflowTemplateServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint=api_mtls_endpoint, + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_workflow_template_service_grpc_lro_client(): + client = WorkflowTemplateServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + transport = client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_workflow_template_service_grpc_lro_async_client(): + client = WorkflowTemplateServiceAsyncClient( + credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio", + ) + transport = client._client._transport + + # Ensure that we have a api-core operations client. + assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_workflow_template_path(): + project = "squid" + region = "clam" + workflow_template = "whelk" + + expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( + project=project, region=region, workflow_template=workflow_template, + ) + actual = WorkflowTemplateServiceClient.workflow_template_path( + project, region, workflow_template + ) + assert expected == actual + + +def test_parse_workflow_template_path(): + expected = { + "project": "octopus", + "region": "oyster", + "workflow_template": "nudibranch", + } + path = WorkflowTemplateServiceClient.workflow_template_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_workflow_template_path(path) + assert expected == actual diff --git a/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py b/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py deleted file mode 100644 index 83736be4..00000000 --- a/tests/unit/gapic/v1/test_autoscaling_policy_service_client_v1.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import dataproc_v1 -from google.cloud.dataproc_v1.proto import autoscaling_policies_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestAutoscalingPolicyServiceClient(object): - def test_create_autoscaling_policy(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - expected_response = {"id": id_, "name": name} - expected_response = autoscaling_policies_pb2.AutoscalingPolicy( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - policy = {} - - response = client.create_autoscaling_policy(parent, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( - parent=parent, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - policy = {} - - with pytest.raises(CustomException): - client.create_autoscaling_policy(parent, policy) - - def test_update_autoscaling_policy(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - expected_response = {"id": id_, "name": name} - expected_response = autoscaling_policies_pb2.AutoscalingPolicy( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup Request - policy = {} - - response = client.update_autoscaling_policy(policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( - policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup request - policy = {} - - with pytest.raises(CustomException): - client.update_autoscaling_policy(policy) - - def test_get_autoscaling_policy(self): - # Setup Expected Response - id_ = "id3355" - name_2 = "name2-1052831874" - expected_response = {"id": id_, "name": name_2} - expected_response = autoscaling_policies_pb2.AutoscalingPolicy( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup Request - name = "name3373707" - - response = client.get_autoscaling_policy(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.GetAutoscalingPolicyRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.get_autoscaling_policy(name) - - def test_list_autoscaling_policies(self): - # Setup Expected Response - next_page_token = "" - policies_element = {} - policies = [policies_element] - expected_response = {"next_page_token": next_page_token, "policies": policies} - expected_response = autoscaling_policies_pb2.ListAutoscalingPoliciesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_autoscaling_policies(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.policies[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.ListAutoscalingPoliciesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_autoscaling_policies_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_autoscaling_policies(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_autoscaling_policy(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup Request - name = "name3373707" - - client.delete_autoscaling_policy(name) - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.DeleteAutoscalingPolicyRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.AutoscalingPolicyServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.delete_autoscaling_policy(name) diff --git a/tests/unit/gapic/v1/test_cluster_controller_client_v1.py b/tests/unit/gapic/v1/test_cluster_controller_client_v1.py deleted file mode 100644 index 81591382..00000000 --- a/tests/unit/gapic/v1/test_cluster_controller_client_v1.py +++ /dev/null @@ -1,414 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import dataproc_v1 -from google.cloud.dataproc_v1.proto import clusters_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestClusterControllerClient(object): - def test_create_cluster(self): - # Setup Expected Response - project_id_2 = "projectId2939242356" - cluster_name = "clusterName-1018081872" - cluster_uuid = "clusterUuid-1017854240" - expected_response = { - "project_id": project_id_2, - "cluster_name": cluster_name, - "cluster_uuid": cluster_uuid, - } - expected_response = clusters_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster = {} - - response = client.create_cluster(project_id, region, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.CreateClusterRequest( - project_id=project_id, region=region, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster = {} - - response = client.create_cluster(project_id, region, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_cluster(self): - # Setup Expected Response - project_id_2 = "projectId2939242356" - cluster_name_2 = "clusterName2875867491" - cluster_uuid = "clusterUuid-1017854240" - expected_response = { - "project_id": project_id_2, - "cluster_name": cluster_name_2, - "cluster_uuid": cluster_uuid, - } - expected_response = clusters_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - cluster = {} - update_mask = {} - - response = client.update_cluster( - project_id, region, cluster_name, cluster, update_mask - ) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.UpdateClusterRequest( - project_id=project_id, - region=region, - cluster_name=cluster_name, - cluster=cluster, - update_mask=update_mask, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - cluster = {} - update_mask = {} - - response = client.update_cluster( - project_id, region, cluster_name, cluster, update_mask - ) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.delete_cluster(project_id, region, cluster_name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.DeleteClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.delete_cluster(project_id, region, cluster_name) - exception = response.exception() - assert exception.errors[0] == error - - def test_diagnose_cluster(self): - # Setup Expected Response - output_uri = "outputUri-1273518802" - expected_response = {"output_uri": output_uri} - expected_response = clusters_pb2.DiagnoseClusterResults(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_diagnose_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - project_id_2 = "projectId2939242356" - cluster_name_2 = "clusterName2875867491" - cluster_uuid = "clusterUuid-1017854240" - expected_response = { - "project_id": project_id_2, - "cluster_name": cluster_name_2, - "cluster_uuid": cluster_uuid, - } - expected_response = clusters_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.get_cluster(project_id, region, cluster_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.GetClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - with pytest.raises(CustomException): - client.get_cluster(project_id, region, cluster_name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "" - clusters_element = {} - clusters = [clusters_element] - expected_response = {"next_page_token": next_page_token, "clusters": clusters} - expected_response = clusters_pb2.ListClustersResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_clusters(project_id, region) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.clusters[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.ListClustersRequest( - project_id=project_id, region=region - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.ClusterControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_clusters(project_id, region) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v1/test_job_controller_client_v1.py b/tests/unit/gapic/v1/test_job_controller_client_v1.py deleted file mode 100644 index 4d777faf..00000000 --- a/tests/unit/gapic/v1/test_job_controller_client_v1.py +++ /dev/null @@ -1,431 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import dataproc_v1 -from google.cloud.dataproc_v1.proto import jobs_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestJobControllerClient(object): - def test_submit_job(self): - # Setup Expected Response - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job(project_id, region, job) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_submit_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - with pytest.raises(CustomException): - client.submit_job(project_id, region, job) - - def test_submit_job_as_operation(self): - # Setup Expected Response - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_submit_job_as_operation_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_job(self): - # Setup Expected Response - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - response = client.get_job(project_id, region, job_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.GetJobRequest( - project_id=project_id, region=region, job_id=job_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - with pytest.raises(CustomException): - client.get_job(project_id, region, job_id) - - def test_list_jobs(self): - # Setup Expected Response - next_page_token = "" - jobs_element = {} - jobs = [jobs_element] - expected_response = {"next_page_token": next_page_token, "jobs": jobs} - expected_response = jobs_pb2.ListJobsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_jobs(project_id, region) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.jobs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.ListJobsRequest( - project_id=project_id, region=region - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_jobs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_jobs(project_id, region) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_job(self): - # Setup Expected Response - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - job = {} - update_mask = {} - - response = client.update_job(project_id, region, job_id, job, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.UpdateJobRequest( - project_id=project_id, - region=region, - job_id=job_id, - job=job, - update_mask=update_mask, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - job = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_job(project_id, region, job_id, job, update_mask) - - def test_cancel_job(self): - # Setup Expected Response - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - response = client.cancel_job(project_id, region, job_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.CancelJobRequest( - project_id=project_id, region=region, job_id=job_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_cancel_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - with pytest.raises(CustomException): - client.cancel_job(project_id, region, job_id) - - def test_delete_job(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - client.delete_job(project_id, region, job_id) - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.DeleteJobRequest( - project_id=project_id, region=region, job_id=job_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - with pytest.raises(CustomException): - client.delete_job(project_id, region, job_id) diff --git a/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py b/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py deleted file mode 100644 index c63831ca..00000000 --- a/tests/unit/gapic/v1/test_workflow_template_service_client_v1.py +++ /dev/null @@ -1,378 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import dataproc_v1 -from google.cloud.dataproc_v1.proto import workflow_templates_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestWorkflowTemplateServiceClient(object): - def test_instantiate_workflow_template(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_instantiate_workflow_template", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.instantiate_workflow_template(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_instantiate_workflow_template_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_instantiate_workflow_template_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.instantiate_workflow_template(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_instantiate_inline_workflow_template(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_instantiate_inline_workflow_template", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.instantiate_inline_workflow_template(parent, template) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.InstantiateInlineWorkflowTemplateRequest( - parent=parent, template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_instantiate_inline_workflow_template_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_instantiate_inline_workflow_template_exception", - done=True, - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.instantiate_inline_workflow_template(parent, template) - exception = response.exception() - assert exception.errors[0] == error - - def test_create_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - version = 351608024 - expected_response = {"id": id_, "name": name, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.create_workflow_template(parent, template) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - with pytest.raises(CustomException): - client.create_workflow_template(parent, template) - - def test_get_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name_2 = "name2-1052831874" - version = 351608024 - expected_response = {"id": id_, "name": name_2, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.get_workflow_template(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.get_workflow_template(name) - - def test_update_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - version = 351608024 - expected_response = {"id": id_, "name": name, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - template = {} - - response = client.update_workflow_template(template) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.UpdateWorkflowTemplateRequest( - template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - template = {} - - with pytest.raises(CustomException): - client.update_workflow_template(template) - - def test_list_workflow_templates(self): - # Setup Expected Response - next_page_token = "" - templates_element = {} - templates = [templates_element] - expected_response = {"next_page_token": next_page_token, "templates": templates} - expected_response = workflow_templates_pb2.ListWorkflowTemplatesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_workflow_templates(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.templates[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.ListWorkflowTemplatesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_workflow_templates_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_workflow_templates(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_workflow_template(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - client.delete_workflow_template(name) - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.DeleteWorkflowTemplateRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1.WorkflowTemplateServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.delete_workflow_template(name) diff --git a/tests/unit/gapic/v1beta2/test_autoscaling_policy_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_autoscaling_policy_service_client_v1beta2.py deleted file mode 100644 index 63127296..00000000 --- a/tests/unit/gapic/v1beta2/test_autoscaling_policy_service_client_v1beta2.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.cloud import dataproc_v1beta2 -from google.cloud.dataproc_v1beta2.proto import autoscaling_policies_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestAutoscalingPolicyServiceClient(object): - def test_create_autoscaling_policy(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - expected_response = {"id": id_, "name": name} - expected_response = autoscaling_policies_pb2.AutoscalingPolicy( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - policy = {} - - response = client.create_autoscaling_policy(parent, policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.CreateAutoscalingPolicyRequest( - parent=parent, policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - policy = {} - - with pytest.raises(CustomException): - client.create_autoscaling_policy(parent, policy) - - def test_update_autoscaling_policy(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - expected_response = {"id": id_, "name": name} - expected_response = autoscaling_policies_pb2.AutoscalingPolicy( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup Request - policy = {} - - response = client.update_autoscaling_policy(policy) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.UpdateAutoscalingPolicyRequest( - policy=policy - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup request - policy = {} - - with pytest.raises(CustomException): - client.update_autoscaling_policy(policy) - - def test_get_autoscaling_policy(self): - # Setup Expected Response - id_ = "id3355" - name_2 = "name2-1052831874" - expected_response = {"id": id_, "name": name_2} - expected_response = autoscaling_policies_pb2.AutoscalingPolicy( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup Request - name = "name3373707" - - response = client.get_autoscaling_policy(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.GetAutoscalingPolicyRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.get_autoscaling_policy(name) - - def test_list_autoscaling_policies(self): - # Setup Expected Response - next_page_token = "" - policies_element = {} - policies = [policies_element] - expected_response = {"next_page_token": next_page_token, "policies": policies} - expected_response = autoscaling_policies_pb2.ListAutoscalingPoliciesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_autoscaling_policies(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.policies[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.ListAutoscalingPoliciesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_autoscaling_policies_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_autoscaling_policies(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_autoscaling_policy(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup Request - name = "name3373707" - - client.delete_autoscaling_policy(name) - - assert len(channel.requests) == 1 - expected_request = autoscaling_policies_pb2.DeleteAutoscalingPolicyRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_autoscaling_policy_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.AutoscalingPolicyServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.delete_autoscaling_policy(name) diff --git a/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py deleted file mode 100644 index cb4d14ad..00000000 --- a/tests/unit/gapic/v1beta2/test_cluster_controller_client_v1beta2.py +++ /dev/null @@ -1,413 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import dataproc_v1beta2 -from google.cloud.dataproc_v1beta2.proto import clusters_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestClusterControllerClient(object): - def test_create_cluster(self): - # Setup Expected Response - project_id_2 = "projectId2939242356" - cluster_name = "clusterName-1018081872" - cluster_uuid = "clusterUuid-1017854240" - expected_response = { - "project_id": project_id_2, - "cluster_name": cluster_name, - "cluster_uuid": cluster_uuid, - } - expected_response = clusters_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster = {} - - response = client.create_cluster(project_id, region, cluster) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.CreateClusterRequest( - project_id=project_id, region=region, cluster=cluster - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster = {} - - response = client.create_cluster(project_id, region, cluster) - exception = response.exception() - assert exception.errors[0] == error - - def test_update_cluster(self): - # Setup Expected Response - project_id_2 = "projectId2939242356" - cluster_name_2 = "clusterName2875867491" - cluster_uuid = "clusterUuid-1017854240" - expected_response = { - "project_id": project_id_2, - "cluster_name": cluster_name_2, - "cluster_uuid": cluster_uuid, - } - expected_response = clusters_pb2.Cluster(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_update_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - cluster = {} - update_mask = {} - - response = client.update_cluster( - project_id, region, cluster_name, cluster, update_mask - ) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.UpdateClusterRequest( - project_id=project_id, - region=region, - cluster_name=cluster_name, - cluster=cluster, - update_mask=update_mask, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_update_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - cluster = {} - update_mask = {} - - response = client.update_cluster( - project_id, region, cluster_name, cluster, update_mask - ) - exception = response.exception() - assert exception.errors[0] == error - - def test_delete_cluster(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.delete_cluster(project_id, region, cluster_name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.DeleteClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.delete_cluster(project_id, region, cluster_name) - exception = response.exception() - assert exception.errors[0] == error - - def test_diagnose_cluster(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.DiagnoseClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_diagnose_cluster_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_diagnose_cluster_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.diagnose_cluster(project_id, region, cluster_name) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_cluster(self): - # Setup Expected Response - project_id_2 = "projectId2939242356" - cluster_name_2 = "clusterName2875867491" - cluster_uuid = "clusterUuid-1017854240" - expected_response = { - "project_id": project_id_2, - "cluster_name": cluster_name_2, - "cluster_uuid": cluster_uuid, - } - expected_response = clusters_pb2.Cluster(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - response = client.get_cluster(project_id, region, cluster_name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.GetClusterRequest( - project_id=project_id, region=region, cluster_name=cluster_name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_cluster_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - cluster_name = "clusterName-1018081872" - - with pytest.raises(CustomException): - client.get_cluster(project_id, region, cluster_name) - - def test_list_clusters(self): - # Setup Expected Response - next_page_token = "" - clusters_element = {} - clusters = [clusters_element] - expected_response = {"next_page_token": next_page_token, "clusters": clusters} - expected_response = clusters_pb2.ListClustersResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_clusters(project_id, region) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.clusters[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = clusters_pb2.ListClustersRequest( - project_id=project_id, region=region - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_clusters_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.ClusterControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_clusters(project_id, region) - with pytest.raises(CustomException): - list(paged_list_response) diff --git a/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py deleted file mode 100644 index 57dbcbe8..00000000 --- a/tests/unit/gapic/v1beta2/test_job_controller_client_v1beta2.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import dataproc_v1beta2 -from google.cloud.dataproc_v1beta2.proto import jobs_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestJobControllerClient(object): - def test_submit_job(self): - # Setup Expected Response - submitted_by = "submittedBy-2047729125" - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "submitted_by": submitted_by, - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job(project_id, region, job) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_submit_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - with pytest.raises(CustomException): - client.submit_job(project_id, region, job) - - def test_submit_job_as_operation(self): - # Setup Expected Response - submitted_by = "submittedBy-2047729125" - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "submitted_by": submitted_by, - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.SubmitJobRequest( - project_id=project_id, region=region, job=job - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_submit_job_as_operation_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_submit_job_as_operation_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job = {} - - response = client.submit_job_as_operation(project_id, region, job) - exception = response.exception() - assert exception.errors[0] == error - - def test_get_job(self): - # Setup Expected Response - submitted_by = "submittedBy-2047729125" - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "submitted_by": submitted_by, - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - response = client.get_job(project_id, region, job_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.GetJobRequest( - project_id=project_id, region=region, job_id=job_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - with pytest.raises(CustomException): - client.get_job(project_id, region, job_id) - - def test_list_jobs(self): - # Setup Expected Response - next_page_token = "" - jobs_element = {} - jobs = [jobs_element] - expected_response = {"next_page_token": next_page_token, "jobs": jobs} - expected_response = jobs_pb2.ListJobsResponse(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_jobs(project_id, region) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.jobs[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.ListJobsRequest( - project_id=project_id, region=region - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_jobs_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - - paged_list_response = client.list_jobs(project_id, region) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_update_job(self): - # Setup Expected Response - submitted_by = "submittedBy-2047729125" - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "submitted_by": submitted_by, - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - job = {} - update_mask = {} - - response = client.update_job(project_id, region, job_id, job, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.UpdateJobRequest( - project_id=project_id, - region=region, - job_id=job_id, - job=job, - update_mask=update_mask, - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - job = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_job(project_id, region, job_id, job, update_mask) - - def test_cancel_job(self): - # Setup Expected Response - submitted_by = "submittedBy-2047729125" - driver_output_resource_uri = "driverOutputResourceUri-542229086" - driver_control_files_uri = "driverControlFilesUri207057643" - job_uuid = "jobUuid-1615012099" - done = True - expected_response = { - "submitted_by": submitted_by, - "driver_output_resource_uri": driver_output_resource_uri, - "driver_control_files_uri": driver_control_files_uri, - "job_uuid": job_uuid, - "done": done, - } - expected_response = jobs_pb2.Job(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - response = client.cancel_job(project_id, region, job_id) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.CancelJobRequest( - project_id=project_id, region=region, job_id=job_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_cancel_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - with pytest.raises(CustomException): - client.cancel_job(project_id, region, job_id) - - def test_delete_job(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup Request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - client.delete_job(project_id, region, job_id) - - assert len(channel.requests) == 1 - expected_request = jobs_pb2.DeleteJobRequest( - project_id=project_id, region=region, job_id=job_id - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_job_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.JobControllerClient() - - # Setup request - project_id = "projectId-1969970175" - region = "region-934795532" - job_id = "jobId-1154752291" - - with pytest.raises(CustomException): - client.delete_job(project_id, region, job_id) diff --git a/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py b/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py deleted file mode 100644 index f8ff56f9..00000000 --- a/tests/unit/gapic/v1beta2/test_workflow_template_service_client_v1beta2.py +++ /dev/null @@ -1,378 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Unit tests.""" - -import mock -import pytest - -from google.rpc import status_pb2 - -from google.cloud import dataproc_v1beta2 -from google.cloud.dataproc_v1beta2.proto import workflow_templates_pb2 -from google.longrunning import operations_pb2 -from google.protobuf import empty_pb2 - - -class MultiCallableStub(object): - """Stub for the grpc.UnaryUnaryMultiCallable interface.""" - - def __init__(self, method, channel_stub): - self.method = method - self.channel_stub = channel_stub - - def __call__(self, request, timeout=None, metadata=None, credentials=None): - self.channel_stub.requests.append((self.method, request)) - - response = None - if self.channel_stub.responses: - response = self.channel_stub.responses.pop() - - if isinstance(response, Exception): - raise response - - if response: - return response - - -class ChannelStub(object): - """Stub for the grpc.Channel interface.""" - - def __init__(self, responses=[]): - self.responses = responses - self.requests = [] - - def unary_unary(self, method, request_serializer=None, response_deserializer=None): - return MultiCallableStub(method, self) - - -class CustomException(Exception): - pass - - -class TestWorkflowTemplateServiceClient(object): - def test_instantiate_workflow_template(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_instantiate_workflow_template", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.instantiate_workflow_template(name) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_instantiate_workflow_template_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_instantiate_workflow_template_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.instantiate_workflow_template(name) - exception = response.exception() - assert exception.errors[0] == error - - def test_instantiate_inline_workflow_template(self): - # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_instantiate_inline_workflow_template", done=True - ) - operation.response.Pack(expected_response) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.instantiate_inline_workflow_template(parent, template) - result = response.result() - assert expected_response == result - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.InstantiateInlineWorkflowTemplateRequest( - parent=parent, template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_instantiate_inline_workflow_template_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_instantiate_inline_workflow_template_exception", - done=True, - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.instantiate_inline_workflow_template(parent, template) - exception = response.exception() - assert exception.errors[0] == error - - def test_create_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - version = 351608024 - expected_response = {"id": id_, "name": name, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - response = client.create_workflow_template(parent, template) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.CreateWorkflowTemplateRequest( - parent=parent, template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_create_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - template = {} - - with pytest.raises(CustomException): - client.create_workflow_template(parent, template) - - def test_get_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name_2 = "name2-1052831874" - version = 351608024 - expected_response = {"id": id_, "name": name_2, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - response = client.get_workflow_template(name) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.GetWorkflowTemplateRequest(name=name) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_get_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.get_workflow_template(name) - - def test_update_workflow_template(self): - # Setup Expected Response - id_ = "id3355" - name = "name3373707" - version = 351608024 - expected_response = {"id": id_, "name": name, "version": version} - expected_response = workflow_templates_pb2.WorkflowTemplate(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - template = {} - - response = client.update_workflow_template(template) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.UpdateWorkflowTemplateRequest( - template=template - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - template = {} - - with pytest.raises(CustomException): - client.update_workflow_template(template) - - def test_list_workflow_templates(self): - # Setup Expected Response - next_page_token = "" - templates_element = {} - templates = [templates_element] - expected_response = {"next_page_token": next_page_token, "templates": templates} - expected_response = workflow_templates_pb2.ListWorkflowTemplatesResponse( - **expected_response - ) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_workflow_templates(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.templates[0] == resources[0] - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.ListWorkflowTemplatesRequest( - parent=parent - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_list_workflow_templates_exception(self): - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - parent = client.region_path("[PROJECT]", "[REGION]") - - paged_list_response = client.list_workflow_templates(parent) - with pytest.raises(CustomException): - list(paged_list_response) - - def test_delete_workflow_template(self): - channel = ChannelStub() - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup Request - name = "name3373707" - - client.delete_workflow_template(name) - - assert len(channel.requests) == 1 - expected_request = workflow_templates_pb2.DeleteWorkflowTemplateRequest( - name=name - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_delete_workflow_template_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = dataproc_v1beta2.WorkflowTemplateServiceClient() - - # Setup request - name = "name3373707" - - with pytest.raises(CustomException): - client.delete_workflow_template(name) From 31af47932ebc6d0b5df0ba1a0f7a208ab55578ae Mon Sep 17 00:00:00 2001 From: arithmetic1728 Date: Mon, 10 Aug 2020 10:24:57 -0700 Subject: [PATCH 2/5] update --- UPGRADING.md | 16 ++++++++++++++++ samples/snippets/create_cluster.py | 2 +- samples/snippets/quickstart/quickstart.py | 6 +++--- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/UPGRADING.md b/UPGRADING.md index 28d1f396..dd7c2f3c 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -149,4 +149,20 @@ from google.cloud import dataproc status = dataproc.ClusterStatus.State.CREATING cluster = dataproc.Cluster(cluster_name="name") +``` + +## Path Helper Methods +The following path helper methods have been removed. Please construct the paths manually. + +```py +project = 'my-project' +location = 'project-location' +region = 'project-region' +workflow_template = 'template' +autoscaling_policy = 'policy' + +location_path = f'projects/{project}/locations/{location}' +region_path = f'projects/{project}/regions/{region}' +workflow_template_path = f'projects/{project}/regions/{region}/workflowTemplates/{workflow_template}' +autoscaling_policy_path = f'projects/{project}/locations/{location}/autoscalingPolicies/{autoscaling_policy}' ``` \ No newline at end of file diff --git a/samples/snippets/create_cluster.py b/samples/snippets/create_cluster.py index f5973083..732b349a 100644 --- a/samples/snippets/create_cluster.py +++ b/samples/snippets/create_cluster.py @@ -39,7 +39,7 @@ def create_cluster(project_id, region, cluster_name): # Create a client with the endpoint set to the desired cluster region. cluster_client = dataproc.ClusterControllerClient( - client_options={"api_endpoint": f"{region}-dataproc.googleapis.com:443",} + client_options={"api_endpoint": f"{region}-dataproc.googleapis.com:443"} ) # Create the cluster config. diff --git a/samples/snippets/quickstart/quickstart.py b/samples/snippets/quickstart/quickstart.py index 362016c7..ba93993e 100644 --- a/samples/snippets/quickstart/quickstart.py +++ b/samples/snippets/quickstart/quickstart.py @@ -77,9 +77,9 @@ def quickstart(project_id, region, cluster_name, job_file_path): # Termimal states for a job. terminal_states = { - dataproc.JobStatus.ERROR, - dataproc.JobStatus.CANCELLED, - dataproc.JobStatus.DONE, + dataproc.JobStatus.State.ERROR, + dataproc.JobStatus.State.CANCELLED, + dataproc.JobStatus.State.DONE, } # Create a timeout such that the job gets cancelled if not in a From a3111c7bdb190c4dc69c2a993740104bf4379de7 Mon Sep 17 00:00:00 2001 From: arithmetic1728 Date: Mon, 10 Aug 2020 10:36:59 -0700 Subject: [PATCH 3/5] update --- samples/snippets/noxfile.py | 26 ++++++++++++++------------ synth.metadata | 13 +++---------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index 5660f08b..ba55d7ce 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -37,22 +37,24 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -67,12 +69,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret @@ -81,7 +83,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +138,7 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) @@ -180,9 +182,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/synth.metadata b/synth.metadata index 334a4418..d3160be7 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,28 +4,21 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-dataproc.git", - "sha": "65c277120e136edd5648047fcb85f8d0cd104408" + "sha": "31af47932ebc6d0b5df0ba1a0f7a208ab55578ae" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5f2f711c91199ba2f609d3f06a2fe22aee4e5be3" + "sha": "94421c47802f56a44c320257b2b4c190dc7d6b68" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5f2f711c91199ba2f609d3f06a2fe22aee4e5be3" - } - }, - { - "git": { - "name": "synthtool", - "remote": "https://github.com/googleapis/synthtool.git", - "sha": "5f2f711c91199ba2f609d3f06a2fe22aee4e5be3" + "sha": "94421c47802f56a44c320257b2b4c190dc7d6b68" } } ], From 73da90dd7cfd690ae9a4425d4eaee7c95d1f28c4 Mon Sep 17 00:00:00 2001 From: arithmetic1728 Date: Mon, 10 Aug 2020 14:51:09 -0700 Subject: [PATCH 4/5] update --- samples/snippets/quickstart/quickstart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/quickstart/quickstart.py b/samples/snippets/quickstart/quickstart.py index ba93993e..bffe3a31 100644 --- a/samples/snippets/quickstart/quickstart.py +++ b/samples/snippets/quickstart/quickstart.py @@ -123,7 +123,7 @@ def quickstart(project_id, region, cluster_name, job_file_path): print( "Job {} finished with state {}:\n{}".format( - job_id, job_response.status.State.Name(job_response.status.state), output + job_id, job_response.status.State.name, output ) ) From 60a88dfa6e285f8b0d8f3764d634f86a882c3ae8 Mon Sep 17 00:00:00 2001 From: arithmetic1728 Date: Mon, 10 Aug 2020 15:21:11 -0700 Subject: [PATCH 5/5] update --- samples/snippets/quickstart/quickstart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/quickstart/quickstart.py b/samples/snippets/quickstart/quickstart.py index bffe3a31..68f0bdf4 100644 --- a/samples/snippets/quickstart/quickstart.py +++ b/samples/snippets/quickstart/quickstart.py @@ -123,7 +123,7 @@ def quickstart(project_id, region, cluster_name, job_file_path): print( "Job {} finished with state {}:\n{}".format( - job_id, job_response.status.State.name, output + job_id, job_response.status.state.name, output ) )