From 2ea98bddbfafd5e728b99f8bcae6b7dc2a741e60 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 21 Jul 2021 15:54:10 +0000 Subject: [PATCH] feat: add a WorkerPools API (#129) BREAKING CHANGE: Proto had a prior definitions of WorkerPool resources which were never supported. This change replaces those resources with definitions that are currently supported. Committer: @mwiczer PiperOrigin-RevId: 385610622 Source-Link: https://github.com/googleapis/googleapis/commit/7a1fb837d3aab1e66562e177b1731280f641dd2a Source-Link: https://github.com/googleapis/googleapis-gen/commit/07289aaf2241f2a254f0a8d95885267e2649c220 --- google/cloud/devtools/cloudbuild/__init__.py | 18 +- .../cloud/devtools/cloudbuild_v1/__init__.py | 12 +- .../services/cloud_build/async_client.py | 371 +++++- .../services/cloud_build/client.py | 393 ++++++- .../services/cloud_build/pagers.py | 128 ++ .../services/cloud_build/transports/base.py | 6 +- .../services/cloud_build/transports/grpc.py | 39 +- .../cloud_build/transports/grpc_asyncio.py | 41 +- .../devtools/cloudbuild_v1/types/__init__.py | 12 +- .../cloudbuild_v1/types/cloudbuild.py | 407 ++++--- owlbot.py | 23 +- scripts/fixup_cloudbuild_v1_keywords.py | 8 +- .../gapic/cloudbuild_v1/test_cloud_build.py | 1039 +++++++++++++++-- 13 files changed, 2028 insertions(+), 469 deletions(-) diff --git a/google/cloud/devtools/cloudbuild/__init__.py b/google/cloud/devtools/cloudbuild/__init__.py index 261dafed..7d0a953d 100644 --- a/google/cloud/devtools/cloudbuild/__init__.py +++ b/google/cloud/devtools/cloudbuild/__init__.py @@ -34,10 +34,16 @@ from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ( CreateBuildTriggerRequest, ) +from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ( + CreateWorkerPoolOperationMetadata, +) from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import CreateWorkerPoolRequest from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ( DeleteBuildTriggerRequest, ) +from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ( + DeleteWorkerPoolOperationMetadata, +) from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import DeleteWorkerPoolRequest from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import FileHashes from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import GetBuildRequest @@ -56,7 +62,7 @@ ) from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ListWorkerPoolsRequest from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ListWorkerPoolsResponse -from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import Network +from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import PrivatePoolV1Config from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import PubsubConfig from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import PullRequestFilter from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import PushFilter @@ -81,10 +87,12 @@ from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ( UpdateBuildTriggerRequest, ) +from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import ( + UpdateWorkerPoolOperationMetadata, +) from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import UpdateWorkerPoolRequest from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import Volume from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import WebhookConfig -from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import WorkerConfig from google.cloud.devtools.cloudbuild_v1.types.cloudbuild import WorkerPool __all__ = ( @@ -101,8 +109,10 @@ "CancelBuildRequest", "CreateBuildRequest", "CreateBuildTriggerRequest", + "CreateWorkerPoolOperationMetadata", "CreateWorkerPoolRequest", "DeleteBuildTriggerRequest", + "DeleteWorkerPoolOperationMetadata", "DeleteWorkerPoolRequest", "FileHashes", "GetBuildRequest", @@ -117,7 +127,7 @@ "ListBuildTriggersResponse", "ListWorkerPoolsRequest", "ListWorkerPoolsResponse", - "Network", + "PrivatePoolV1Config", "PubsubConfig", "PullRequestFilter", "PushFilter", @@ -136,9 +146,9 @@ "StorageSourceManifest", "TimeSpan", "UpdateBuildTriggerRequest", + "UpdateWorkerPoolOperationMetadata", "UpdateWorkerPoolRequest", "Volume", "WebhookConfig", - "WorkerConfig", "WorkerPool", ) diff --git a/google/cloud/devtools/cloudbuild_v1/__init__.py b/google/cloud/devtools/cloudbuild_v1/__init__.py index 15505413..a8b8a76c 100644 --- a/google/cloud/devtools/cloudbuild_v1/__init__.py +++ b/google/cloud/devtools/cloudbuild_v1/__init__.py @@ -28,8 +28,10 @@ from .types.cloudbuild import CancelBuildRequest from .types.cloudbuild import CreateBuildRequest from .types.cloudbuild import CreateBuildTriggerRequest +from .types.cloudbuild import CreateWorkerPoolOperationMetadata from .types.cloudbuild import CreateWorkerPoolRequest from .types.cloudbuild import DeleteBuildTriggerRequest +from .types.cloudbuild import DeleteWorkerPoolOperationMetadata from .types.cloudbuild import DeleteWorkerPoolRequest from .types.cloudbuild import FileHashes from .types.cloudbuild import GetBuildRequest @@ -44,7 +46,7 @@ from .types.cloudbuild import ListBuildTriggersResponse from .types.cloudbuild import ListWorkerPoolsRequest from .types.cloudbuild import ListWorkerPoolsResponse -from .types.cloudbuild import Network +from .types.cloudbuild import PrivatePoolV1Config from .types.cloudbuild import PubsubConfig from .types.cloudbuild import PullRequestFilter from .types.cloudbuild import PushFilter @@ -63,10 +65,10 @@ from .types.cloudbuild import StorageSourceManifest from .types.cloudbuild import TimeSpan from .types.cloudbuild import UpdateBuildTriggerRequest +from .types.cloudbuild import UpdateWorkerPoolOperationMetadata from .types.cloudbuild import UpdateWorkerPoolRequest from .types.cloudbuild import Volume from .types.cloudbuild import WebhookConfig -from .types.cloudbuild import WorkerConfig from .types.cloudbuild import WorkerPool __all__ = ( @@ -83,8 +85,10 @@ "CloudBuildClient", "CreateBuildRequest", "CreateBuildTriggerRequest", + "CreateWorkerPoolOperationMetadata", "CreateWorkerPoolRequest", "DeleteBuildTriggerRequest", + "DeleteWorkerPoolOperationMetadata", "DeleteWorkerPoolRequest", "FileHashes", "GetBuildRequest", @@ -99,7 +103,7 @@ "ListBuildsResponse", "ListWorkerPoolsRequest", "ListWorkerPoolsResponse", - "Network", + "PrivatePoolV1Config", "PubsubConfig", "PullRequestFilter", "PushFilter", @@ -118,9 +122,9 @@ "StorageSourceManifest", "TimeSpan", "UpdateBuildTriggerRequest", + "UpdateWorkerPoolOperationMetadata", "UpdateWorkerPoolRequest", "Volume", "WebhookConfig", - "WorkerConfig", "WorkerPool", ) diff --git a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/async_client.py b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/async_client.py index 36b0532d..b03edb19 100644 --- a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/async_client.py +++ b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/async_client.py @@ -31,6 +31,8 @@ from google.cloud.devtools.cloudbuild_v1.services.cloud_build import pagers from google.cloud.devtools.cloudbuild_v1.types import cloudbuild from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import CloudBuildTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import CloudBuildGrpcAsyncIOTransport @@ -59,6 +61,8 @@ class CloudBuildAsyncClient: parse_build_trigger_path = staticmethod(CloudBuildClient.parse_build_trigger_path) crypto_key_path = staticmethod(CloudBuildClient.crypto_key_path) parse_crypto_key_path = staticmethod(CloudBuildClient.parse_crypto_key_path) + network_path = staticmethod(CloudBuildClient.network_path) + parse_network_path = staticmethod(CloudBuildClient.parse_network_path) secret_version_path = staticmethod(CloudBuildClient.secret_version_path) parse_secret_version_path = staticmethod(CloudBuildClient.parse_secret_version_path) service_account_path = staticmethod(CloudBuildClient.service_account_path) @@ -69,6 +73,8 @@ class CloudBuildAsyncClient: parse_subscription_path = staticmethod(CloudBuildClient.parse_subscription_path) topic_path = staticmethod(CloudBuildClient.topic_path) parse_topic_path = staticmethod(CloudBuildClient.parse_topic_path) + worker_pool_path = staticmethod(CloudBuildClient.worker_pool_path) + parse_worker_pool_path = staticmethod(CloudBuildClient.parse_worker_pool_path) common_billing_account_path = staticmethod( CloudBuildClient.common_billing_account_path ) @@ -1281,19 +1287,43 @@ async def create_worker_pool( self, request: cloudbuild.CreateWorkerPoolRequest = None, *, + parent: str = None, + worker_pool: cloudbuild.WorkerPool = None, + worker_pool_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> cloudbuild.WorkerPool: - r"""Creates a ``WorkerPool`` to run the builds, and returns the new - worker pool. - - This API is experimental. + ) -> operation_async.AsyncOperation: + r"""Creates a ``WorkerPool``. Args: request (:class:`google.cloud.devtools.cloudbuild_v1.types.CreateWorkerPoolRequest`): The request object. Request to create a new `WorkerPool`. + parent (:class:`str`): + Required. The parent resource where this worker pool + will be created. Format: + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + worker_pool (:class:`google.cloud.devtools.cloudbuild_v1.types.WorkerPool`): + Required. ``WorkerPool`` resource to create. + This corresponds to the ``worker_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + worker_pool_id (:class:`str`): + Required. Immutable. The ID to use for the + ``WorkerPool``, which will become the final component of + the resource name. + + This value should be 1-63 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``worker_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1301,24 +1331,49 @@ async def create_worker_pool( sent along with the request as metadata. Returns: - google.cloud.devtools.cloudbuild_v1.types.WorkerPool: - Configuration for a WorkerPool to run - the builds. - Workers are machines that Cloud Build - uses to run your builds. By default, all - workers run in a project owned by Cloud - Build. To have full control over the - workers that execute your builds -- such - as enabling them to access private - resources on your private network -- you - can request Cloud Build to run the - workers in your own project by creating - a custom workers pool. + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.devtools.cloudbuild_v1.types.WorkerPool` + Configuration for a WorkerPool. + + Cloud Build owns and maintains a pool of workers for + general use and have no access to a project's private + network. By default, builds submitted to Cloud Build + will use a worker from this pool. + + If your build needs access to resources on a private + network, create and use a WorkerPool to run your + builds. Private WorkerPools give your builds access + to any single VPC network that you administer, + including any on-prem resources connected to that VPC + network. For an overview of private pools, see + [Private pools + overview](\ https://cloud.google.com/build/docs/private-pools/private-pools-overview). """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, worker_pool, worker_pool_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + request = cloudbuild.CreateWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if worker_pool is not None: + request.worker_pool = worker_pool + if worker_pool_id is not None: + request.worker_pool_id = worker_pool_id + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( @@ -1327,9 +1382,23 @@ async def create_worker_pool( client_info=DEFAULT_CLIENT_INFO, ) + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + cloudbuild.WorkerPool, + metadata_type=cloudbuild.CreateWorkerPoolOperationMetadata, + ) + # Done; return the response. return response @@ -1337,18 +1406,25 @@ async def get_worker_pool( self, request: cloudbuild.GetWorkerPoolRequest = None, *, + name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloudbuild.WorkerPool: - r"""Returns information about a ``WorkerPool``. - - This API is experimental. + r"""Returns details of a ``WorkerPool``. Args: request (:class:`google.cloud.devtools.cloudbuild_v1.types.GetWorkerPoolRequest`): The request object. Request to get a `WorkerPool` with the specified name. + name (:class:`str`): + Required. The name of the ``WorkerPool`` to retrieve. + Format: + ``projects/{project}/locations/{location}/workerPools/{workerPool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1357,23 +1433,40 @@ async def get_worker_pool( Returns: google.cloud.devtools.cloudbuild_v1.types.WorkerPool: - Configuration for a WorkerPool to run - the builds. - Workers are machines that Cloud Build - uses to run your builds. By default, all - workers run in a project owned by Cloud - Build. To have full control over the - workers that execute your builds -- such - as enabling them to access private - resources on your private network -- you - can request Cloud Build to run the - workers in your own project by creating - a custom workers pool. + Configuration for a WorkerPool. + + Cloud Build owns and maintains a pool of workers for + general use and have no access to a project's private + network. By default, builds submitted to Cloud Build + will use a worker from this pool. + + If your build needs access to resources on a private + network, create and use a WorkerPool to run your + builds. Private WorkerPools give your builds access + to any single VPC network that you administer, + including any on-prem resources connected to that VPC + network. For an overview of private pools, see + [Private pools + overview](\ https://cloud.google.com/build/docs/private-pools/private-pools-overview). """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + request = cloudbuild.GetWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( @@ -1392,6 +1485,12 @@ async def get_worker_pool( client_info=DEFAULT_CLIENT_INFO, ) + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -1402,26 +1501,66 @@ async def delete_worker_pool( self, request: cloudbuild.DeleteWorkerPoolRequest = None, *, + name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a ``WorkerPool`` by its project ID and WorkerPool name. - - This API is experimental. + ) -> operation_async.AsyncOperation: + r"""Deletes a ``WorkerPool``. Args: request (:class:`google.cloud.devtools.cloudbuild_v1.types.DeleteWorkerPoolRequest`): The request object. Request to delete a `WorkerPool`. + name (:class:`str`): + Required. The name of the ``WorkerPool`` to delete. + Format: + ``projects/{project}/locations/{workerPool}/workerPools/{workerPool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + request = cloudbuild.DeleteWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( @@ -1430,26 +1569,58 @@ async def delete_worker_pool( client_info=DEFAULT_CLIENT_INFO, ) + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + # Send the request. - await rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=cloudbuild.DeleteWorkerPoolOperationMetadata, ) + # Done; return the response. + return response + async def update_worker_pool( self, request: cloudbuild.UpdateWorkerPoolRequest = None, *, + worker_pool: cloudbuild.WorkerPool = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> cloudbuild.WorkerPool: - r"""Update a ``WorkerPool``. - - This API is experimental. + ) -> operation_async.AsyncOperation: + r"""Updates a ``WorkerPool``. Args: request (:class:`google.cloud.devtools.cloudbuild_v1.types.UpdateWorkerPoolRequest`): The request object. Request to update a `WorkerPool`. + worker_pool (:class:`google.cloud.devtools.cloudbuild_v1.types.WorkerPool`): + Required. The ``WorkerPool`` to update. + + The ``name`` field is used to identify the + ``WorkerPool`` to update. Format: + ``projects/{project}/locations/{location}/workerPools/{workerPool}``. + + This corresponds to the ``worker_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + A mask specifying which fields in ``worker_pool`` to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1457,24 +1628,47 @@ async def update_worker_pool( sent along with the request as metadata. Returns: - google.cloud.devtools.cloudbuild_v1.types.WorkerPool: - Configuration for a WorkerPool to run - the builds. - Workers are machines that Cloud Build - uses to run your builds. By default, all - workers run in a project owned by Cloud - Build. To have full control over the - workers that execute your builds -- such - as enabling them to access private - resources on your private network -- you - can request Cloud Build to run the - workers in your own project by creating - a custom workers pool. + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.devtools.cloudbuild_v1.types.WorkerPool` + Configuration for a WorkerPool. + + Cloud Build owns and maintains a pool of workers for + general use and have no access to a project's private + network. By default, builds submitted to Cloud Build + will use a worker from this pool. + + If your build needs access to resources on a private + network, create and use a WorkerPool to run your + builds. Private WorkerPools give your builds access + to any single VPC network that you administer, + including any on-prem resources connected to that VPC + network. For an overview of private pools, see + [Private pools + overview](\ https://cloud.google.com/build/docs/private-pools/private-pools-overview). """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([worker_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + request = cloudbuild.UpdateWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if worker_pool is not None: + request.worker_pool = worker_pool + if update_mask is not None: + request.update_mask = update_mask + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( @@ -1483,9 +1677,25 @@ async def update_worker_pool( client_info=DEFAULT_CLIENT_INFO, ) + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("worker_pool.name", request.worker_pool.name),) + ), + ) + # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + cloudbuild.WorkerPool, + metadata_type=cloudbuild.UpdateWorkerPoolOperationMetadata, + ) + # Done; return the response. return response @@ -1493,17 +1703,24 @@ async def list_worker_pools( self, request: cloudbuild.ListWorkerPoolsRequest = None, *, + parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> cloudbuild.ListWorkerPoolsResponse: - r"""List project's ``WorkerPools``. - - This API is experimental. + ) -> pagers.ListWorkerPoolsAsyncPager: + r"""Lists ``WorkerPool``\ s. Args: request (:class:`google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsRequest`): - The request object. Request to list `WorkerPools`. + The request object. Request to list `WorkerPool`\s. + parent (:class:`str`): + Required. The parent of the collection of + ``WorkerPools``. Format: + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1511,12 +1728,30 @@ async def list_worker_pools( sent along with the request as metadata. Returns: - google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse: + google.cloud.devtools.cloudbuild_v1.services.cloud_build.pagers.ListWorkerPoolsAsyncPager: Response containing existing WorkerPools. + + Iterating over this object will yield results and + resolve additional pages automatically. + """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + request = cloudbuild.ListWorkerPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( @@ -1535,9 +1770,21 @@ async def list_worker_pools( client_info=DEFAULT_CLIENT_INFO, ) + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListWorkerPoolsAsyncPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + # Done; return the response. return response diff --git a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/client.py b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/client.py index 6f3a91a3..7498b7a1 100644 --- a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/client.py +++ b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/client.py @@ -35,6 +35,8 @@ from google.cloud.devtools.cloudbuild_v1.services.cloud_build import pagers from google.cloud.devtools.cloudbuild_v1.types import cloudbuild from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from .transports.base import CloudBuildTransport, DEFAULT_CLIENT_INFO from .transports.grpc import CloudBuildGrpcTransport @@ -205,6 +207,21 @@ def parse_crypto_key_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def network_path(project: str, network: str,) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + @staticmethod def secret_version_path(project: str, secret: str, version: str,) -> str: """Returns a fully-qualified secret_version string.""" @@ -263,6 +280,22 @@ def parse_topic_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/topics/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def worker_pool_path(project: str, location: str, worker_pool: str,) -> str: + """Returns a fully-qualified worker_pool string.""" + return "projects/{project}/locations/{location}/workerPools/{worker_pool}".format( + project=project, location=location, worker_pool=worker_pool, + ) + + @staticmethod + def parse_worker_pool_path(path: str) -> Dict[str, str]: + """Parses a worker_pool path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/workerPools/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path(billing_account: str,) -> str: """Returns a fully-qualified billing_account string.""" @@ -1489,19 +1522,43 @@ def create_worker_pool( self, request: cloudbuild.CreateWorkerPoolRequest = None, *, + parent: str = None, + worker_pool: cloudbuild.WorkerPool = None, + worker_pool_id: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> cloudbuild.WorkerPool: - r"""Creates a ``WorkerPool`` to run the builds, and returns the new - worker pool. - - This API is experimental. + ) -> operation.Operation: + r"""Creates a ``WorkerPool``. Args: request (google.cloud.devtools.cloudbuild_v1.types.CreateWorkerPoolRequest): The request object. Request to create a new `WorkerPool`. + parent (str): + Required. The parent resource where this worker pool + will be created. Format: + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + worker_pool (google.cloud.devtools.cloudbuild_v1.types.WorkerPool): + Required. ``WorkerPool`` resource to create. + This corresponds to the ``worker_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + worker_pool_id (str): + Required. Immutable. The ID to use for the + ``WorkerPool``, which will become the final component of + the resource name. + + This value should be 1-63 characters, and valid + characters are /[a-z][0-9]-/. + + This corresponds to the ``worker_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1509,36 +1566,74 @@ def create_worker_pool( sent along with the request as metadata. Returns: - google.cloud.devtools.cloudbuild_v1.types.WorkerPool: - Configuration for a WorkerPool to run - the builds. - Workers are machines that Cloud Build - uses to run your builds. By default, all - workers run in a project owned by Cloud - Build. To have full control over the - workers that execute your builds -- such - as enabling them to access private - resources on your private network -- you - can request Cloud Build to run the - workers in your own project by creating - a custom workers pool. + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.devtools.cloudbuild_v1.types.WorkerPool` + Configuration for a WorkerPool. + + Cloud Build owns and maintains a pool of workers for + general use and have no access to a project's private + network. By default, builds submitted to Cloud Build + will use a worker from this pool. + + If your build needs access to resources on a private + network, create and use a WorkerPool to run your + builds. Private WorkerPools give your builds access + to any single VPC network that you administer, + including any on-prem resources connected to that VPC + network. For an overview of private pools, see + [Private pools + overview](\ https://cloud.google.com/build/docs/private-pools/private-pools-overview). """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, worker_pool, worker_pool_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + # Minor optimization to avoid making a copy if the user passes # in a cloudbuild.CreateWorkerPoolRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloudbuild.CreateWorkerPoolRequest): request = cloudbuild.CreateWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if worker_pool is not None: + request.worker_pool = worker_pool + if worker_pool_id is not None: + request.worker_pool_id = worker_pool_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.create_worker_pool] + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + cloudbuild.WorkerPool, + metadata_type=cloudbuild.CreateWorkerPoolOperationMetadata, + ) + # Done; return the response. return response @@ -1546,18 +1641,25 @@ def get_worker_pool( self, request: cloudbuild.GetWorkerPoolRequest = None, *, + name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> cloudbuild.WorkerPool: - r"""Returns information about a ``WorkerPool``. - - This API is experimental. + r"""Returns details of a ``WorkerPool``. Args: request (google.cloud.devtools.cloudbuild_v1.types.GetWorkerPoolRequest): The request object. Request to get a `WorkerPool` with the specified name. + name (str): + Required. The name of the ``WorkerPool`` to retrieve. + Format: + ``projects/{project}/locations/{location}/workerPools/{workerPool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1566,32 +1668,54 @@ def get_worker_pool( Returns: google.cloud.devtools.cloudbuild_v1.types.WorkerPool: - Configuration for a WorkerPool to run - the builds. - Workers are machines that Cloud Build - uses to run your builds. By default, all - workers run in a project owned by Cloud - Build. To have full control over the - workers that execute your builds -- such - as enabling them to access private - resources on your private network -- you - can request Cloud Build to run the - workers in your own project by creating - a custom workers pool. + Configuration for a WorkerPool. + + Cloud Build owns and maintains a pool of workers for + general use and have no access to a project's private + network. By default, builds submitted to Cloud Build + will use a worker from this pool. + + If your build needs access to resources on a private + network, create and use a WorkerPool to run your + builds. Private WorkerPools give your builds access + to any single VPC network that you administer, + including any on-prem resources connected to that VPC + network. For an overview of private pools, see + [Private pools + overview](\ https://cloud.google.com/build/docs/private-pools/private-pools-overview). """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + # Minor optimization to avoid making a copy if the user passes # in a cloudbuild.GetWorkerPoolRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloudbuild.GetWorkerPoolRequest): request = cloudbuild.GetWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.get_worker_pool] + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) @@ -1602,55 +1726,126 @@ def delete_worker_pool( self, request: cloudbuild.DeleteWorkerPoolRequest = None, *, + name: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a ``WorkerPool`` by its project ID and WorkerPool name. - - This API is experimental. + ) -> operation.Operation: + r"""Deletes a ``WorkerPool``. Args: request (google.cloud.devtools.cloudbuild_v1.types.DeleteWorkerPoolRequest): The request object. Request to delete a `WorkerPool`. + name (str): + Required. The name of the ``WorkerPool`` to delete. + Format: + ``projects/{project}/locations/{workerPool}/workerPools/{workerPool}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + The JSON representation for Empty is empty JSON + object {}. + """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + # Minor optimization to avoid making a copy if the user passes # in a cloudbuild.DeleteWorkerPoolRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloudbuild.DeleteWorkerPoolRequest): request = cloudbuild.DeleteWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.delete_worker_pool] + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + # Send the request. - rpc( - request, retry=retry, timeout=timeout, metadata=metadata, + response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=cloudbuild.DeleteWorkerPoolOperationMetadata, ) + # Done; return the response. + return response + def update_worker_pool( self, request: cloudbuild.UpdateWorkerPoolRequest = None, *, + worker_pool: cloudbuild.WorkerPool = None, + update_mask: field_mask_pb2.FieldMask = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> cloudbuild.WorkerPool: - r"""Update a ``WorkerPool``. - - This API is experimental. + ) -> operation.Operation: + r"""Updates a ``WorkerPool``. Args: request (google.cloud.devtools.cloudbuild_v1.types.UpdateWorkerPoolRequest): The request object. Request to update a `WorkerPool`. + worker_pool (google.cloud.devtools.cloudbuild_v1.types.WorkerPool): + Required. The ``WorkerPool`` to update. + + The ``name`` field is used to identify the + ``WorkerPool`` to update. Format: + ``projects/{project}/locations/{location}/workerPools/{workerPool}``. + + This corresponds to the ``worker_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + A mask specifying which fields in ``worker_pool`` to + update. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1658,36 +1853,74 @@ def update_worker_pool( sent along with the request as metadata. Returns: - google.cloud.devtools.cloudbuild_v1.types.WorkerPool: - Configuration for a WorkerPool to run - the builds. - Workers are machines that Cloud Build - uses to run your builds. By default, all - workers run in a project owned by Cloud - Build. To have full control over the - workers that execute your builds -- such - as enabling them to access private - resources on your private network -- you - can request Cloud Build to run the - workers in your own project by creating - a custom workers pool. + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.devtools.cloudbuild_v1.types.WorkerPool` + Configuration for a WorkerPool. + + Cloud Build owns and maintains a pool of workers for + general use and have no access to a project's private + network. By default, builds submitted to Cloud Build + will use a worker from this pool. + + If your build needs access to resources on a private + network, create and use a WorkerPool to run your + builds. Private WorkerPools give your builds access + to any single VPC network that you administer, + including any on-prem resources connected to that VPC + network. For an overview of private pools, see + [Private pools + overview](\ https://cloud.google.com/build/docs/private-pools/private-pools-overview). """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([worker_pool, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + # Minor optimization to avoid making a copy if the user passes # in a cloudbuild.UpdateWorkerPoolRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloudbuild.UpdateWorkerPoolRequest): request = cloudbuild.UpdateWorkerPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if worker_pool is not None: + request.worker_pool = worker_pool + if update_mask is not None: + request.update_mask = update_mask # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.update_worker_pool] + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("worker_pool.name", request.worker_pool.name),) + ), + ) + # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + cloudbuild.WorkerPool, + metadata_type=cloudbuild.UpdateWorkerPoolOperationMetadata, + ) + # Done; return the response. return response @@ -1695,17 +1928,24 @@ def list_worker_pools( self, request: cloudbuild.ListWorkerPoolsRequest = None, *, + parent: str = None, retry: retries.Retry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), - ) -> cloudbuild.ListWorkerPoolsResponse: - r"""List project's ``WorkerPools``. - - This API is experimental. + ) -> pagers.ListWorkerPoolsPager: + r"""Lists ``WorkerPool``\ s. Args: request (google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsRequest): - The request object. Request to list `WorkerPools`. + The request object. Request to list `WorkerPool`\s. + parent (str): + Required. The parent of the collection of + ``WorkerPools``. Format: + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1713,24 +1953,53 @@ def list_worker_pools( sent along with the request as metadata. Returns: - google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse: + google.cloud.devtools.cloudbuild_v1.services.cloud_build.pagers.ListWorkerPoolsPager: Response containing existing WorkerPools. + + Iterating over this object will yield results and + resolve additional pages automatically. + """ # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + # Minor optimization to avoid making a copy if the user passes # in a cloudbuild.ListWorkerPoolsRequest. # There's no risk of modifying the input as we've already verified # there are no flattened fields. if not isinstance(request, cloudbuild.ListWorkerPoolsRequest): request = cloudbuild.ListWorkerPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = self._transport._wrapped_methods[self._transport.list_worker_pools] + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + # Send the request. response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,) + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListWorkerPoolsPager( + method=rpc, request=request, response=response, metadata=metadata, + ) + # Done; return the response. return response diff --git a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/pagers.py b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/pagers.py index 47cca6c1..e9e8b276 100644 --- a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/pagers.py +++ b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/pagers.py @@ -281,3 +281,131 @@ async def async_generator(): def __repr__(self) -> str: return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListWorkerPoolsPager: + """A pager for iterating through ``list_worker_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``worker_pools`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListWorkerPools`` requests and continue to iterate + through the ``worker_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., cloudbuild.ListWorkerPoolsResponse], + request: cloudbuild.ListWorkerPoolsRequest, + response: cloudbuild.ListWorkerPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsRequest): + The initial request object. + response (google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cloudbuild.ListWorkerPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[cloudbuild.ListWorkerPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[cloudbuild.WorkerPool]: + for page in self.pages: + yield from page.worker_pools + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListWorkerPoolsAsyncPager: + """A pager for iterating through ``list_worker_pools`` requests. + + This class thinly wraps an initial + :class:`google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``worker_pools`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListWorkerPools`` requests and continue to iterate + through the ``worker_pools`` field on the + corresponding responses. + + All the usual :class:`google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[cloudbuild.ListWorkerPoolsResponse]], + request: cloudbuild.ListWorkerPoolsRequest, + response: cloudbuild.ListWorkerPoolsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsRequest): + The initial request object. + response (google.cloud.devtools.cloudbuild_v1.types.ListWorkerPoolsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cloudbuild.ListWorkerPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[cloudbuild.ListWorkerPoolsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[cloudbuild.WorkerPool]: + async def async_generator(): + async for page in self.pages: + for response in page.worker_pools: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/base.py b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/base.py index 4fe2d519..e33d07d3 100644 --- a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/base.py +++ b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/base.py @@ -424,7 +424,7 @@ def create_worker_pool( self, ) -> Callable[ [cloudbuild.CreateWorkerPoolRequest], - Union[cloudbuild.WorkerPool, Awaitable[cloudbuild.WorkerPool]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @@ -442,7 +442,7 @@ def delete_worker_pool( self, ) -> Callable[ [cloudbuild.DeleteWorkerPoolRequest], - Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() @@ -451,7 +451,7 @@ def update_worker_pool( self, ) -> Callable[ [cloudbuild.UpdateWorkerPoolRequest], - Union[cloudbuild.WorkerPool, Awaitable[cloudbuild.WorkerPool]], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], ]: raise NotImplementedError() diff --git a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc.py b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc.py index ed0fbf11..d5badf8c 100644 --- a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc.py +++ b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc.py @@ -618,17 +618,14 @@ def receive_trigger_webhook( @property def create_worker_pool( self, - ) -> Callable[[cloudbuild.CreateWorkerPoolRequest], cloudbuild.WorkerPool]: + ) -> Callable[[cloudbuild.CreateWorkerPoolRequest], operations_pb2.Operation]: r"""Return a callable for the create worker pool method over gRPC. - Creates a ``WorkerPool`` to run the builds, and returns the new - worker pool. - - This API is experimental. + Creates a ``WorkerPool``. Returns: Callable[[~.CreateWorkerPoolRequest], - ~.WorkerPool]: + ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ @@ -640,7 +637,7 @@ def create_worker_pool( self._stubs["create_worker_pool"] = self.grpc_channel.unary_unary( "/google.devtools.cloudbuild.v1.CloudBuild/CreateWorkerPool", request_serializer=cloudbuild.CreateWorkerPoolRequest.serialize, - response_deserializer=cloudbuild.WorkerPool.deserialize, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_worker_pool"] @@ -650,9 +647,7 @@ def get_worker_pool( ) -> Callable[[cloudbuild.GetWorkerPoolRequest], cloudbuild.WorkerPool]: r"""Return a callable for the get worker pool method over gRPC. - Returns information about a ``WorkerPool``. - - This API is experimental. + Returns details of a ``WorkerPool``. Returns: Callable[[~.GetWorkerPoolRequest], @@ -675,16 +670,14 @@ def get_worker_pool( @property def delete_worker_pool( self, - ) -> Callable[[cloudbuild.DeleteWorkerPoolRequest], empty_pb2.Empty]: + ) -> Callable[[cloudbuild.DeleteWorkerPoolRequest], operations_pb2.Operation]: r"""Return a callable for the delete worker pool method over gRPC. - Deletes a ``WorkerPool`` by its project ID and WorkerPool name. - - This API is experimental. + Deletes a ``WorkerPool``. Returns: Callable[[~.DeleteWorkerPoolRequest], - ~.Empty]: + ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ @@ -696,23 +689,21 @@ def delete_worker_pool( self._stubs["delete_worker_pool"] = self.grpc_channel.unary_unary( "/google.devtools.cloudbuild.v1.CloudBuild/DeleteWorkerPool", request_serializer=cloudbuild.DeleteWorkerPoolRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_worker_pool"] @property def update_worker_pool( self, - ) -> Callable[[cloudbuild.UpdateWorkerPoolRequest], cloudbuild.WorkerPool]: + ) -> Callable[[cloudbuild.UpdateWorkerPoolRequest], operations_pb2.Operation]: r"""Return a callable for the update worker pool method over gRPC. - Update a ``WorkerPool``. - - This API is experimental. + Updates a ``WorkerPool``. Returns: Callable[[~.UpdateWorkerPoolRequest], - ~.WorkerPool]: + ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ @@ -724,7 +715,7 @@ def update_worker_pool( self._stubs["update_worker_pool"] = self.grpc_channel.unary_unary( "/google.devtools.cloudbuild.v1.CloudBuild/UpdateWorkerPool", request_serializer=cloudbuild.UpdateWorkerPoolRequest.serialize, - response_deserializer=cloudbuild.WorkerPool.deserialize, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_worker_pool"] @@ -736,9 +727,7 @@ def list_worker_pools( ]: r"""Return a callable for the list worker pools method over gRPC. - List project's ``WorkerPools``. - - This API is experimental. + Lists ``WorkerPool``\ s. Returns: Callable[[~.ListWorkerPoolsRequest], diff --git a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc_asyncio.py b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc_asyncio.py index db2426be..e1b46229 100644 --- a/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc_asyncio.py +++ b/google/cloud/devtools/cloudbuild_v1/services/cloud_build/transports/grpc_asyncio.py @@ -637,18 +637,15 @@ def receive_trigger_webhook( def create_worker_pool( self, ) -> Callable[ - [cloudbuild.CreateWorkerPoolRequest], Awaitable[cloudbuild.WorkerPool] + [cloudbuild.CreateWorkerPoolRequest], Awaitable[operations_pb2.Operation] ]: r"""Return a callable for the create worker pool method over gRPC. - Creates a ``WorkerPool`` to run the builds, and returns the new - worker pool. - - This API is experimental. + Creates a ``WorkerPool``. Returns: Callable[[~.CreateWorkerPoolRequest], - Awaitable[~.WorkerPool]]: + Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ @@ -660,7 +657,7 @@ def create_worker_pool( self._stubs["create_worker_pool"] = self.grpc_channel.unary_unary( "/google.devtools.cloudbuild.v1.CloudBuild/CreateWorkerPool", request_serializer=cloudbuild.CreateWorkerPoolRequest.serialize, - response_deserializer=cloudbuild.WorkerPool.deserialize, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["create_worker_pool"] @@ -670,9 +667,7 @@ def get_worker_pool( ) -> Callable[[cloudbuild.GetWorkerPoolRequest], Awaitable[cloudbuild.WorkerPool]]: r"""Return a callable for the get worker pool method over gRPC. - Returns information about a ``WorkerPool``. - - This API is experimental. + Returns details of a ``WorkerPool``. Returns: Callable[[~.GetWorkerPoolRequest], @@ -695,16 +690,16 @@ def get_worker_pool( @property def delete_worker_pool( self, - ) -> Callable[[cloudbuild.DeleteWorkerPoolRequest], Awaitable[empty_pb2.Empty]]: + ) -> Callable[ + [cloudbuild.DeleteWorkerPoolRequest], Awaitable[operations_pb2.Operation] + ]: r"""Return a callable for the delete worker pool method over gRPC. - Deletes a ``WorkerPool`` by its project ID and WorkerPool name. - - This API is experimental. + Deletes a ``WorkerPool``. Returns: Callable[[~.DeleteWorkerPoolRequest], - Awaitable[~.Empty]]: + Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ @@ -716,7 +711,7 @@ def delete_worker_pool( self._stubs["delete_worker_pool"] = self.grpc_channel.unary_unary( "/google.devtools.cloudbuild.v1.CloudBuild/DeleteWorkerPool", request_serializer=cloudbuild.DeleteWorkerPoolRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["delete_worker_pool"] @@ -724,17 +719,15 @@ def delete_worker_pool( def update_worker_pool( self, ) -> Callable[ - [cloudbuild.UpdateWorkerPoolRequest], Awaitable[cloudbuild.WorkerPool] + [cloudbuild.UpdateWorkerPoolRequest], Awaitable[operations_pb2.Operation] ]: r"""Return a callable for the update worker pool method over gRPC. - Update a ``WorkerPool``. - - This API is experimental. + Updates a ``WorkerPool``. Returns: Callable[[~.UpdateWorkerPoolRequest], - Awaitable[~.WorkerPool]]: + Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ @@ -746,7 +739,7 @@ def update_worker_pool( self._stubs["update_worker_pool"] = self.grpc_channel.unary_unary( "/google.devtools.cloudbuild.v1.CloudBuild/UpdateWorkerPool", request_serializer=cloudbuild.UpdateWorkerPoolRequest.serialize, - response_deserializer=cloudbuild.WorkerPool.deserialize, + response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["update_worker_pool"] @@ -759,9 +752,7 @@ def list_worker_pools( ]: r"""Return a callable for the list worker pools method over gRPC. - List project's ``WorkerPools``. - - This API is experimental. + Lists ``WorkerPool``\ s. Returns: Callable[[~.ListWorkerPoolsRequest], diff --git a/google/cloud/devtools/cloudbuild_v1/types/__init__.py b/google/cloud/devtools/cloudbuild_v1/types/__init__.py index 42004be6..e4a206f0 100644 --- a/google/cloud/devtools/cloudbuild_v1/types/__init__.py +++ b/google/cloud/devtools/cloudbuild_v1/types/__init__.py @@ -25,8 +25,10 @@ CancelBuildRequest, CreateBuildRequest, CreateBuildTriggerRequest, + CreateWorkerPoolOperationMetadata, CreateWorkerPoolRequest, DeleteBuildTriggerRequest, + DeleteWorkerPoolOperationMetadata, DeleteWorkerPoolRequest, FileHashes, GetBuildRequest, @@ -41,7 +43,7 @@ ListBuildTriggersResponse, ListWorkerPoolsRequest, ListWorkerPoolsResponse, - Network, + PrivatePoolV1Config, PubsubConfig, PullRequestFilter, PushFilter, @@ -60,10 +62,10 @@ StorageSourceManifest, TimeSpan, UpdateBuildTriggerRequest, + UpdateWorkerPoolOperationMetadata, UpdateWorkerPoolRequest, Volume, WebhookConfig, - WorkerConfig, WorkerPool, ) @@ -79,8 +81,10 @@ "CancelBuildRequest", "CreateBuildRequest", "CreateBuildTriggerRequest", + "CreateWorkerPoolOperationMetadata", "CreateWorkerPoolRequest", "DeleteBuildTriggerRequest", + "DeleteWorkerPoolOperationMetadata", "DeleteWorkerPoolRequest", "FileHashes", "GetBuildRequest", @@ -95,7 +99,7 @@ "ListBuildTriggersResponse", "ListWorkerPoolsRequest", "ListWorkerPoolsResponse", - "Network", + "PrivatePoolV1Config", "PubsubConfig", "PullRequestFilter", "PushFilter", @@ -114,9 +118,9 @@ "StorageSourceManifest", "TimeSpan", "UpdateBuildTriggerRequest", + "UpdateWorkerPoolOperationMetadata", "UpdateWorkerPoolRequest", "Volume", "WebhookConfig", - "WorkerConfig", "WorkerPool", ) diff --git a/google/cloud/devtools/cloudbuild_v1/types/cloudbuild.py b/google/cloud/devtools/cloudbuild_v1/types/cloudbuild.py index f4013600..b36219af 100644 --- a/google/cloud/devtools/cloudbuild_v1/types/cloudbuild.py +++ b/google/cloud/devtools/cloudbuild_v1/types/cloudbuild.py @@ -17,6 +17,7 @@ from google.api import httpbody_pb2 # type: ignore from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -66,14 +67,16 @@ "ReceiveTriggerWebhookRequest", "ReceiveTriggerWebhookResponse", "WorkerPool", - "WorkerConfig", - "Network", + "PrivatePoolV1Config", "CreateWorkerPoolRequest", "GetWorkerPoolRequest", "DeleteWorkerPoolRequest", "UpdateWorkerPoolRequest", "ListWorkerPoolsRequest", "ListWorkerPoolsResponse", + "CreateWorkerPoolOperationMetadata", + "UpdateWorkerPoolOperationMetadata", + "DeleteWorkerPoolOperationMetadata", }, ) @@ -1430,11 +1433,13 @@ class BuildOptions(proto.Message): Option to define build log streaming behavior to Google Cloud Storage. worker_pool (str): - Option to specify a ``WorkerPool`` for the build. Format: - projects/{project}/locations/{location}/workerPools/{workerPool} + This field deprecated; please use ``pool.name`` instead. + pool (google.cloud.devtools.cloudbuild_v1.types.BuildOptions.PoolOption): + Optional. Specification for execution on a ``WorkerPool``. - This field is in beta and is available only to restricted - users. + See `running builds in a private + pool `__ + for more information. logging (google.cloud.devtools.cloudbuild_v1.types.BuildOptions.LoggingMode): Option to specify the logging mode, which determines if and where build logs are stored. @@ -1509,6 +1514,25 @@ class LoggingMode(proto.Enum): CLOUD_LOGGING_ONLY = 5 NONE = 4 + class PoolOption(proto.Message): + r"""Details about how a build should be executed on a ``WorkerPool``. + + See `running builds in a private + pool `__ + for more information. + + Attributes: + name (str): + The ``WorkerPool`` resource to execute the build on. You + must have ``cloudbuild.workerpools.use`` on the project + hosting the WorkerPool. + + Format + projects/{project}/locations/{location}/workerPools/{workerPoolId} + """ + + name = proto.Field(proto.STRING, number=1,) + source_provenance_hash = proto.RepeatedField( proto.ENUM, number=1, enum="Hash.HashType", ) @@ -1519,6 +1543,7 @@ class LoggingMode(proto.Enum): dynamic_substitutions = proto.Field(proto.BOOL, number=17,) log_streaming_option = proto.Field(proto.ENUM, number=5, enum=LogStreamingOption,) worker_pool = proto.Field(proto.STRING, number=7,) + pool = proto.Field(proto.MESSAGE, number=19, message=PoolOption,) logging = proto.Field(proto.ENUM, number=11, enum=LoggingMode,) env = proto.RepeatedField(proto.STRING, number=12,) secret_env = proto.RepeatedField(proto.STRING, number=13,) @@ -1560,37 +1585,39 @@ class ReceiveTriggerWebhookResponse(proto.Message): class WorkerPool(proto.Message): - r"""Configuration for a WorkerPool to run the builds. - Workers are machines that Cloud Build uses to run your builds. - By default, all workers run in a project owned by Cloud Build. - To have full control over the workers that execute your builds - -- such as enabling them to access private resources on your - private network -- you can request Cloud Build to run the - workers in your own project by creating a custom workers pool. + r"""Configuration for a ``WorkerPool``. + + Cloud Build owns and maintains a pool of workers for general use and + have no access to a project's private network. By default, builds + submitted to Cloud Build will use a worker from this pool. + + If your build needs access to resources on a private network, create + and use a ``WorkerPool`` to run your builds. Private + ``WorkerPool``\ s give your builds access to any single VPC network + that you administer, including any on-prem resources connected to + that VPC network. For an overview of private pools, see `Private + pools + overview `__. Attributes: name (str): - User-defined name of the ``WorkerPool``. - project_id (str): - The project ID of the GCP project for which the - ``WorkerPool`` is created. - service_account_email (str): - Output only. The service account used to manage the - ``WorkerPool``. The service account must have the Compute - Instance Admin (Beta) permission at the project level. - worker_count (int): - Total number of workers to be created across - all requested regions. - worker_config (google.cloud.devtools.cloudbuild_v1.types.WorkerConfig): - Configuration to be used for a creating workers in the - ``WorkerPool``. - regions (Sequence[google.cloud.devtools.cloudbuild_v1.types.WorkerPool.Region]): - List of regions to create the ``WorkerPool``. Regions can't - be empty. If Cloud Build adds a new GCP region in the - future, the existing ``WorkerPool`` will not be enabled in - the new region automatically; you must add the new region to - the ``regions`` field to enable the ``WorkerPool`` in that - region. + Output only. The resource name of the ``WorkerPool``, with + format + ``projects/{project}/locations/{location}/workerPools/{worker_pool}``. + The value of ``{worker_pool}`` is provided by + ``worker_pool_id`` in ``CreateWorkerPool`` request and the + value of ``{location}`` is determined by the endpoint + accessed. + display_name (str): + A user-specified, human-readable name for the + ``WorkerPool``. If provided, this value must be 1-63 + characters. + uid (str): + Output only. A unique identifier for the ``WorkerPool``. + annotations (Sequence[google.cloud.devtools.cloudbuild_v1.types.WorkerPool.AnnotationsEntry]): + User specified annotations. See + https://google.aip.dev/128#annotations for more + details such as format and size limitations. create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time at which the request to create the ``WorkerPool`` was received. @@ -1600,106 +1627,102 @@ class WorkerPool(proto.Message): delete_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time at which the request to delete the ``WorkerPool`` was received. - status (google.cloud.devtools.cloudbuild_v1.types.WorkerPool.Status): - Output only. WorkerPool Status. + state (google.cloud.devtools.cloudbuild_v1.types.WorkerPool.State): + Output only. ``WorkerPool`` state. + private_pool_v1_config (google.cloud.devtools.cloudbuild_v1.types.PrivatePoolV1Config): + Private Pool using a v1 configuration. + etag (str): + Output only. Checksum computed by the server. + May be sent on update and delete requests to + ensure that the client has an up-to-date value + before proceeding. """ - class Region(proto.Enum): - r"""Supported GCP regions to create the ``WorkerPool``.""" - REGION_UNSPECIFIED = 0 - US_CENTRAL1 = 1 - US_WEST1 = 2 - US_EAST1 = 3 - US_EAST4 = 4 - - class Status(proto.Enum): - r"""``WorkerPool`` status""" - STATUS_UNSPECIFIED = 0 + class State(proto.Enum): + r"""State of the ``WorkerPool``.""" + STATE_UNSPECIFIED = 0 CREATING = 1 RUNNING = 2 DELETING = 3 DELETED = 4 - name = proto.Field(proto.STRING, number=14,) - project_id = proto.Field(proto.STRING, number=2,) - service_account_email = proto.Field(proto.STRING, number=3,) - worker_count = proto.Field(proto.INT64, number=4,) - worker_config = proto.Field(proto.MESSAGE, number=16, message="WorkerConfig",) - regions = proto.RepeatedField(proto.ENUM, number=9, enum=Region,) - create_time = proto.Field( - proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, - ) - update_time = proto.Field( - proto.MESSAGE, number=17, message=timestamp_pb2.Timestamp, - ) - delete_time = proto.Field( - proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp, + name = proto.Field(proto.STRING, number=1,) + display_name = proto.Field(proto.STRING, number=2,) + uid = proto.Field(proto.STRING, number=3,) + annotations = proto.MapField(proto.STRING, proto.STRING, number=4,) + create_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,) + update_time = proto.Field(proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp,) + delete_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,) + state = proto.Field(proto.ENUM, number=8, enum=State,) + private_pool_v1_config = proto.Field( + proto.MESSAGE, number=12, oneof="config", message="PrivatePoolV1Config", ) - status = proto.Field(proto.ENUM, number=13, enum=Status,) + etag = proto.Field(proto.STRING, number=11,) -class WorkerConfig(proto.Message): - r"""WorkerConfig defines the configuration to be used for a - creating workers in the pool. +class PrivatePoolV1Config(proto.Message): + r"""Configuration for a V1 ``PrivatePool``. Attributes: - machine_type (str): - Machine Type of the worker, such as n1-standard-1. See - https://cloud.google.com/compute/docs/machine-types. If left - blank, Cloud Build will use a standard unspecified machine - to create the worker pool. ``machine_type`` is overridden if - you specify a different machine type in ``build_options``. - In this case, the VM specified in the ``build_options`` will - be created on demand at build time. For more information see - https://cloud.google.com/cloud-build/docs/speeding-up-builds#using_custom_virtual_machine_sizes - disk_size_gb (int): - Size of the disk attached to the worker, in GB. See - https://cloud.google.com/compute/docs/disks/ If ``0`` is - specified, Cloud Build will use a standard disk size. - ``disk_size`` is overridden if you specify a different disk - size in ``build_options``. In this case, a VM with a disk - size specified in the ``build_options`` will be created on - demand at build time. For more information see - https://cloud.google.com/cloud-build/docs/api/reference/rest/v1/projects.builds#buildoptions - network (google.cloud.devtools.cloudbuild_v1.types.Network): - The network definition used to create the worker. If this - section is left empty, the workers will be created in - WorkerPool.project_id on the default network. - tag (str): - The tag applied to the worker, and the same tag used by the - firewall rule. It is used to identify the Cloud Build - workers among other VMs. The default value for tag is - ``worker``. + worker_config (google.cloud.devtools.cloudbuild_v1.types.PrivatePoolV1Config.WorkerConfig): + Machine configuration for the workers in the + pool. + network_config (google.cloud.devtools.cloudbuild_v1.types.PrivatePoolV1Config.NetworkConfig): + Network configuration for the pool. """ - machine_type = proto.Field(proto.STRING, number=1,) - disk_size_gb = proto.Field(proto.INT64, number=2,) - network = proto.Field(proto.MESSAGE, number=3, message="Network",) - tag = proto.Field(proto.STRING, number=4,) + class WorkerConfig(proto.Message): + r"""Defines the configuration to be used for creating workers in + the pool. + Attributes: + machine_type (str): + Machine type of a worker, such as ``e2-medium``. See `Worker + pool config + file `__. + If left blank, Cloud Build will use a sensible default. + disk_size_gb (int): + Size of the disk attached to the worker, in GB. See `Worker + pool config + file `__. + Specify a value of up to 1000. If ``0`` is specified, Cloud + Build will use a standard disk size. + """ -class Network(proto.Message): - r"""Network describes the GCP network used to create workers in. + machine_type = proto.Field(proto.STRING, number=1,) + disk_size_gb = proto.Field(proto.INT64, number=2,) - Attributes: - project_id (str): - Project id containing the defined network and subnetwork. - For a peered VPC, this will be the same as the project_id in - which the workers are created. For a shared VPC, this will - be the project sharing the network with the project_id - project in which workers will be created. For custom workers - with no VPC, this will be the same as project_id. - network (str): - Network on which the workers are created. - "default" network is used if empty. - subnetwork (str): - Subnetwork on which the workers are created. - "default" subnetwork is used if empty. - """ + class NetworkConfig(proto.Message): + r"""Defines the network configuration for the pool. + Attributes: + peered_network (str): + Required. Immutable. The network definition that the workers + are peered to. If this section is left empty, the workers + will be peered to ``WorkerPool.project_id`` on the service + producer network. Must be in the format + ``projects/{project}/global/networks/{network}``, where + ``{project}`` is a project number, such as ``12345``, and + ``{network}`` is the name of a VPC network in the project. + See `Understanding network configuration + options `__ + egress_option (google.cloud.devtools.cloudbuild_v1.types.PrivatePoolV1Config.NetworkConfig.EgressOption): + Option to configure network egress for the + workers. + """ - project_id = proto.Field(proto.STRING, number=1,) - network = proto.Field(proto.STRING, number=2,) - subnetwork = proto.Field(proto.STRING, number=3,) + class EgressOption(proto.Enum): + r"""Defines the egress option for the pool.""" + EGRESS_OPTION_UNSPECIFIED = 0 + NO_PUBLIC_EGRESS = 1 + PUBLIC_EGRESS = 2 + + peered_network = proto.Field(proto.STRING, number=1,) + egress_option = proto.Field( + proto.ENUM, number=2, enum="PrivatePoolV1Config.NetworkConfig.EgressOption", + ) + + worker_config = proto.Field(proto.MESSAGE, number=1, message=WorkerConfig,) + network_config = proto.Field(proto.MESSAGE, number=2, message=NetworkConfig,) class CreateWorkerPoolRequest(proto.Message): @@ -1707,13 +1730,26 @@ class CreateWorkerPoolRequest(proto.Message): Attributes: parent (str): - ID of the parent project. + Required. The parent resource where this worker pool will be + created. Format: + ``projects/{project}/locations/{location}``. worker_pool (google.cloud.devtools.cloudbuild_v1.types.WorkerPool): - ``WorkerPool`` resource to create. + Required. ``WorkerPool`` resource to create. + worker_pool_id (str): + Required. Immutable. The ID to use for the ``WorkerPool``, + which will become the final component of the resource name. + + This value should be 1-63 characters, and valid characters + are /[a-z][0-9]-/. + validate_only (bool): + If set, validate the request and preview the + response, but do not actually post it. """ parent = proto.Field(proto.STRING, number=1,) worker_pool = proto.Field(proto.MESSAGE, number=2, message="WorkerPool",) + worker_pool_id = proto.Field(proto.STRING, number=3,) + validate_only = proto.Field(proto.BOOL, number=4,) class GetWorkerPoolRequest(proto.Message): @@ -1721,9 +1757,9 @@ class GetWorkerPoolRequest(proto.Message): Attributes: name (str): - The field will contain name of the resource - requested, for example: - "projects/project-1/workerPools/workerpool-name". + Required. The name of the ``WorkerPool`` to retrieve. + Format: + ``projects/{project}/locations/{location}/workerPools/{workerPool}``. """ name = proto.Field(proto.STRING, number=1,) @@ -1734,39 +1770,70 @@ class DeleteWorkerPoolRequest(proto.Message): Attributes: name (str): - The field will contain name of the resource - requested, for example: - "projects/project-1/workerPools/workerpool-name". + Required. The name of the ``WorkerPool`` to delete. Format: + ``projects/{project}/locations/{workerPool}/workerPools/{workerPool}``. + etag (str): + Optional. If this is provided, it must match + the server's etag on the workerpool for the + request to be processed. + allow_missing (bool): + If set to true, and the ``WorkerPool`` is not found, the + request will succeed but no action will be taken on the + server. + validate_only (bool): + If set, validate the request and preview the + response, but do not actually post it. """ name = proto.Field(proto.STRING, number=1,) + etag = proto.Field(proto.STRING, number=2,) + allow_missing = proto.Field(proto.BOOL, number=3,) + validate_only = proto.Field(proto.BOOL, number=4,) class UpdateWorkerPoolRequest(proto.Message): r"""Request to update a ``WorkerPool``. Attributes: - name (str): - The field will contain name of the resource - requested, for example: - "projects/project-1/workerPools/workerpool-name". worker_pool (google.cloud.devtools.cloudbuild_v1.types.WorkerPool): - ``WorkerPool`` resource to update. + Required. The ``WorkerPool`` to update. + + The ``name`` field is used to identify the ``WorkerPool`` to + update. Format: + ``projects/{project}/locations/{location}/workerPools/{workerPool}``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + A mask specifying which fields in ``worker_pool`` to update. + validate_only (bool): + If set, validate the request and preview the + response, but do not actually post it. """ - name = proto.Field(proto.STRING, number=2,) - worker_pool = proto.Field(proto.MESSAGE, number=3, message="WorkerPool",) + worker_pool = proto.Field(proto.MESSAGE, number=1, message="WorkerPool",) + update_mask = proto.Field( + proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask, + ) + validate_only = proto.Field(proto.BOOL, number=4,) class ListWorkerPoolsRequest(proto.Message): - r"""Request to list ``WorkerPools``. + r"""Request to list ``WorkerPool``\ s. Attributes: parent (str): - ID of the parent project. + Required. The parent of the collection of ``WorkerPools``. + Format: ``projects/{project}/locations/{location}``. + page_size (int): + The maximum number of ``WorkerPool``\ s to return. The + service may return fewer than this value. If omitted, the + server will use a sensible default. + page_token (str): + A page token, received from a previous ``ListWorkerPools`` + call. Provide this to retrieve the subsequent page. """ parent = proto.Field(proto.STRING, number=1,) + page_size = proto.Field(proto.INT32, number=2,) + page_token = proto.Field(proto.STRING, number=3,) class ListWorkerPoolsResponse(proto.Message): @@ -1774,10 +1841,82 @@ class ListWorkerPoolsResponse(proto.Message): Attributes: worker_pools (Sequence[google.cloud.devtools.cloudbuild_v1.types.WorkerPool]): - ``WorkerPools`` for the project. + ``WorkerPools`` for the specified project. + next_page_token (str): + Continuation token used to page through large + result sets. Provide this value in a subsequent + ListWorkerPoolsRequest to return the next page + of results. """ + @property + def raw_page(self): + return self + worker_pools = proto.RepeatedField(proto.MESSAGE, number=1, message="WorkerPool",) + next_page_token = proto.Field(proto.STRING, number=2,) + + +class CreateWorkerPoolOperationMetadata(proto.Message): + r"""Metadata for the ``CreateWorkerPool`` operation. + + Attributes: + worker_pool (str): + The resource name of the ``WorkerPool`` to create. Format: + ``projects/{project}/locations/{location}/workerPools/{worker_pool}``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Time the operation was created. + complete_time (google.protobuf.timestamp_pb2.Timestamp): + Time the operation was completed. + """ + + worker_pool = proto.Field(proto.STRING, number=1,) + create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + complete_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, + ) + + +class UpdateWorkerPoolOperationMetadata(proto.Message): + r"""Metadata for the ``UpdateWorkerPool`` operation. + + Attributes: + worker_pool (str): + The resource name of the ``WorkerPool`` being updated. + Format: + ``projects/{project}/locations/{location}/workerPools/{worker_pool}``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Time the operation was created. + complete_time (google.protobuf.timestamp_pb2.Timestamp): + Time the operation was completed. + """ + + worker_pool = proto.Field(proto.STRING, number=1,) + create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + complete_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, + ) + + +class DeleteWorkerPoolOperationMetadata(proto.Message): + r"""Metadata for the ``DeleteWorkerPool`` operation. + + Attributes: + worker_pool (str): + The resource name of the ``WorkerPool`` being deleted. + Format: + ``projects/{project}/locations/{location}/workerPools/{worker_pool}``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Time the operation was created. + complete_time (google.protobuf.timestamp_pb2.Timestamp): + Time the operation was completed. + """ + + worker_pool = proto.Field(proto.STRING, number=1,) + create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,) + complete_time = proto.Field( + proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owlbot.py b/owlbot.py index 5eccb501..e2969cf7 100644 --- a/owlbot.py +++ b/owlbot.py @@ -30,6 +30,12 @@ Attributes:""", ) + # Work around sphinx docs issue + s.replace(library / f"google/devtools/cloudbuild_{library.name}/services/cloud_build/*client.py", + "`WorkerPool`s.", + r"`WorkerPool`\\s.", + ) + # Fix namespace s.replace( library / f"google/devtools/**/*.py", @@ -47,13 +53,6 @@ f"google.cloud.devtools.cloudbuild_{library.name}", ) - # Rename package to `google-cloud-build` - s.replace( - [library / "**/*.rst", library / "*/**/*.py", library / "**/*.md"], - "google-cloud-devtools-cloudbuild", - "google-cloud-build" - ) - s.move(library / "google/devtools/cloudbuild", "google/cloud/devtools/cloudbuild") s.move( library / f"google/devtools/cloudbuild_{library.name}", @@ -75,14 +74,4 @@ ) s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file -# TODO(busunkim): Use latest sphinx after microgenerator transition -s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') - -s.replace( - "noxfile.py", - "google.cloud.cloudbuild", - "google.cloud.devtools.cloudbuild", -) - - s.shell.run(["nox", "-s", "blacken"], hide_output=False) diff --git a/scripts/fixup_cloudbuild_v1_keywords.py b/scripts/fixup_cloudbuild_v1_keywords.py index 32908765..99a089f1 100644 --- a/scripts/fixup_cloudbuild_v1_keywords.py +++ b/scripts/fixup_cloudbuild_v1_keywords.py @@ -42,20 +42,20 @@ class cloudbuildCallTransformer(cst.CSTTransformer): 'cancel_build': ('project_id', 'id', 'name', ), 'create_build': ('project_id', 'build', 'parent', ), 'create_build_trigger': ('project_id', 'trigger', 'parent', ), - 'create_worker_pool': ('parent', 'worker_pool', ), + 'create_worker_pool': ('parent', 'worker_pool', 'worker_pool_id', 'validate_only', ), 'delete_build_trigger': ('project_id', 'trigger_id', 'name', ), - 'delete_worker_pool': ('name', ), + 'delete_worker_pool': ('name', 'etag', 'allow_missing', 'validate_only', ), 'get_build': ('project_id', 'id', 'name', ), 'get_build_trigger': ('project_id', 'trigger_id', 'name', ), 'get_worker_pool': ('name', ), 'list_builds': ('project_id', 'parent', 'page_size', 'page_token', 'filter', ), 'list_build_triggers': ('project_id', 'parent', 'page_size', 'page_token', ), - 'list_worker_pools': ('parent', ), + 'list_worker_pools': ('parent', 'page_size', 'page_token', ), 'receive_trigger_webhook': ('name', 'body', 'project_id', 'trigger', 'secret', ), 'retry_build': ('project_id', 'id', 'name', ), 'run_build_trigger': ('project_id', 'trigger_id', 'name', 'source', ), 'update_build_trigger': ('project_id', 'trigger_id', 'trigger', ), - 'update_worker_pool': ('name', 'worker_pool', ), + 'update_worker_pool': ('worker_pool', 'update_mask', 'validate_only', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/tests/unit/gapic/cloudbuild_v1/test_cloud_build.py b/tests/unit/gapic/cloudbuild_v1/test_cloud_build.py index 8dcaaee8..e26b4d9b 100644 --- a/tests/unit/gapic/cloudbuild_v1/test_cloud_build.py +++ b/tests/unit/gapic/cloudbuild_v1/test_cloud_build.py @@ -49,6 +49,7 @@ from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth @@ -2810,14 +2811,7 @@ def test_create_worker_pool( type(client.transport.create_worker_pool), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = cloudbuild.WorkerPool( - name="name_value", - project_id="project_id_value", - service_account_email="service_account_email_value", - worker_count=1314, - regions=[cloudbuild.WorkerPool.Region.US_CENTRAL1], - status=cloudbuild.WorkerPool.Status.CREATING, - ) + call.return_value = operations_pb2.Operation(name="operations/spam") response = client.create_worker_pool(request) # Establish that the underlying gRPC stub method was called. @@ -2826,13 +2820,7 @@ def test_create_worker_pool( assert args[0] == cloudbuild.CreateWorkerPoolRequest() # Establish that the response is the type that we expect. - assert isinstance(response, cloudbuild.WorkerPool) - assert response.name == "name_value" - assert response.project_id == "project_id_value" - assert response.service_account_email == "service_account_email_value" - assert response.worker_count == 1314 - assert response.regions == [cloudbuild.WorkerPool.Region.US_CENTRAL1] - assert response.status == cloudbuild.WorkerPool.Status.CREATING + assert isinstance(response, future.Future) def test_create_worker_pool_from_dict(): @@ -2874,14 +2862,7 @@ async def test_create_worker_pool_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - cloudbuild.WorkerPool( - name="name_value", - project_id="project_id_value", - service_account_email="service_account_email_value", - worker_count=1314, - regions=[cloudbuild.WorkerPool.Region.US_CENTRAL1], - status=cloudbuild.WorkerPool.Status.CREATING, - ) + operations_pb2.Operation(name="operations/spam") ) response = await client.create_worker_pool(request) @@ -2891,13 +2872,7 @@ async def test_create_worker_pool_async( assert args[0] == cloudbuild.CreateWorkerPoolRequest() # Establish that the response is the type that we expect. - assert isinstance(response, cloudbuild.WorkerPool) - assert response.name == "name_value" - assert response.project_id == "project_id_value" - assert response.service_account_email == "service_account_email_value" - assert response.worker_count == 1314 - assert response.regions == [cloudbuild.WorkerPool.Region.US_CENTRAL1] - assert response.status == cloudbuild.WorkerPool.Status.CREATING + assert isinstance(response, future.Future) @pytest.mark.asyncio @@ -2905,6 +2880,147 @@ async def test_create_worker_pool_async_from_dict(): await test_create_worker_pool_async(request_type=dict) +def test_create_worker_pool_field_headers(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.CreateWorkerPoolRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_worker_pool), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_worker_pool_field_headers_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.CreateWorkerPoolRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_worker_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_create_worker_pool_flattened(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_worker_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_worker_pool( + parent="parent_value", + worker_pool=cloudbuild.WorkerPool(name="name_value"), + worker_pool_id="worker_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].worker_pool == cloudbuild.WorkerPool(name="name_value") + assert args[0].worker_pool_id == "worker_pool_id_value" + + +def test_create_worker_pool_flattened_error(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_worker_pool( + cloudbuild.CreateWorkerPoolRequest(), + parent="parent_value", + worker_pool=cloudbuild.WorkerPool(name="name_value"), + worker_pool_id="worker_pool_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_worker_pool_flattened_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_worker_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_worker_pool( + parent="parent_value", + worker_pool=cloudbuild.WorkerPool(name="name_value"), + worker_pool_id="worker_pool_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + assert args[0].worker_pool == cloudbuild.WorkerPool(name="name_value") + assert args[0].worker_pool_id == "worker_pool_id_value" + + +@pytest.mark.asyncio +async def test_create_worker_pool_flattened_error_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_worker_pool( + cloudbuild.CreateWorkerPoolRequest(), + parent="parent_value", + worker_pool=cloudbuild.WorkerPool(name="name_value"), + worker_pool_id="worker_pool_id_value", + ) + + def test_get_worker_pool( transport: str = "grpc", request_type=cloudbuild.GetWorkerPoolRequest ): @@ -2921,11 +3037,15 @@ def test_get_worker_pool( # Designate an appropriate return value for the call. call.return_value = cloudbuild.WorkerPool( name="name_value", - project_id="project_id_value", - service_account_email="service_account_email_value", - worker_count=1314, - regions=[cloudbuild.WorkerPool.Region.US_CENTRAL1], - status=cloudbuild.WorkerPool.Status.CREATING, + display_name="display_name_value", + uid="uid_value", + state=cloudbuild.WorkerPool.State.CREATING, + etag="etag_value", + private_pool_v1_config=cloudbuild.PrivatePoolV1Config( + worker_config=cloudbuild.PrivatePoolV1Config.WorkerConfig( + machine_type="machine_type_value" + ) + ), ) response = client.get_worker_pool(request) @@ -2937,11 +3057,10 @@ def test_get_worker_pool( # Establish that the response is the type that we expect. assert isinstance(response, cloudbuild.WorkerPool) assert response.name == "name_value" - assert response.project_id == "project_id_value" - assert response.service_account_email == "service_account_email_value" - assert response.worker_count == 1314 - assert response.regions == [cloudbuild.WorkerPool.Region.US_CENTRAL1] - assert response.status == cloudbuild.WorkerPool.Status.CREATING + assert response.display_name == "display_name_value" + assert response.uid == "uid_value" + assert response.state == cloudbuild.WorkerPool.State.CREATING + assert response.etag == "etag_value" def test_get_worker_pool_from_dict(): @@ -2981,11 +3100,10 @@ async def test_get_worker_pool_async( call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( cloudbuild.WorkerPool( name="name_value", - project_id="project_id_value", - service_account_email="service_account_email_value", - worker_count=1314, - regions=[cloudbuild.WorkerPool.Region.US_CENTRAL1], - status=cloudbuild.WorkerPool.Status.CREATING, + display_name="display_name_value", + uid="uid_value", + state=cloudbuild.WorkerPool.State.CREATING, + etag="etag_value", ) ) response = await client.get_worker_pool(request) @@ -2998,11 +3116,10 @@ async def test_get_worker_pool_async( # Establish that the response is the type that we expect. assert isinstance(response, cloudbuild.WorkerPool) assert response.name == "name_value" - assert response.project_id == "project_id_value" - assert response.service_account_email == "service_account_email_value" - assert response.worker_count == 1314 - assert response.regions == [cloudbuild.WorkerPool.Region.US_CENTRAL1] - assert response.status == cloudbuild.WorkerPool.Status.CREATING + assert response.display_name == "display_name_value" + assert response.uid == "uid_value" + assert response.state == cloudbuild.WorkerPool.State.CREATING + assert response.etag == "etag_value" @pytest.mark.asyncio @@ -3010,6 +3127,121 @@ async def test_get_worker_pool_async_from_dict(): await test_get_worker_pool_async(request_type=dict) +def test_get_worker_pool_field_headers(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.GetWorkerPoolRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_worker_pool), "__call__") as call: + call.return_value = cloudbuild.WorkerPool() + client.get_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_worker_pool_field_headers_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.GetWorkerPoolRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_worker_pool), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloudbuild.WorkerPool() + ) + await client.get_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_get_worker_pool_flattened(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_worker_pool), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cloudbuild.WorkerPool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_worker_pool(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_get_worker_pool_flattened_error(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_worker_pool( + cloudbuild.GetWorkerPoolRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_worker_pool_flattened_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_worker_pool), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = cloudbuild.WorkerPool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloudbuild.WorkerPool() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_worker_pool(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_get_worker_pool_flattened_error_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_worker_pool( + cloudbuild.GetWorkerPoolRequest(), name="name_value", + ) + + def test_delete_worker_pool( transport: str = "grpc", request_type=cloudbuild.DeleteWorkerPoolRequest ): @@ -3026,7 +3258,7 @@ def test_delete_worker_pool( type(client.transport.delete_worker_pool), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = None + call.return_value = operations_pb2.Operation(name="operations/spam") response = client.delete_worker_pool(request) # Establish that the underlying gRPC stub method was called. @@ -3035,7 +3267,7 @@ def test_delete_worker_pool( assert args[0] == cloudbuild.DeleteWorkerPoolRequest() # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, future.Future) def test_delete_worker_pool_from_dict(): @@ -3076,7 +3308,9 @@ async def test_delete_worker_pool_async( type(client.transport.delete_worker_pool), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) response = await client.delete_worker_pool(request) # Establish that the underlying gRPC stub method was called. @@ -3085,7 +3319,7 @@ async def test_delete_worker_pool_async( assert args[0] == cloudbuild.DeleteWorkerPoolRequest() # Establish that the response is the type that we expect. - assert response is None + assert isinstance(response, future.Future) @pytest.mark.asyncio @@ -3093,6 +3327,129 @@ async def test_delete_worker_pool_async_from_dict(): await test_delete_worker_pool_async(request_type=dict) +def test_delete_worker_pool_field_headers(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.DeleteWorkerPoolRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_worker_pool), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_worker_pool_field_headers_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.DeleteWorkerPoolRequest() + + request.name = "name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_worker_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] + + +def test_delete_worker_pool_flattened(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_worker_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_worker_pool(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +def test_delete_worker_pool_flattened_error(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_worker_pool( + cloudbuild.DeleteWorkerPoolRequest(), name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_worker_pool_flattened_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_worker_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_worker_pool(name="name_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].name == "name_value" + + +@pytest.mark.asyncio +async def test_delete_worker_pool_flattened_error_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_worker_pool( + cloudbuild.DeleteWorkerPoolRequest(), name="name_value", + ) + + def test_update_worker_pool( transport: str = "grpc", request_type=cloudbuild.UpdateWorkerPoolRequest ): @@ -3109,14 +3466,7 @@ def test_update_worker_pool( type(client.transport.update_worker_pool), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = cloudbuild.WorkerPool( - name="name_value", - project_id="project_id_value", - service_account_email="service_account_email_value", - worker_count=1314, - regions=[cloudbuild.WorkerPool.Region.US_CENTRAL1], - status=cloudbuild.WorkerPool.Status.CREATING, - ) + call.return_value = operations_pb2.Operation(name="operations/spam") response = client.update_worker_pool(request) # Establish that the underlying gRPC stub method was called. @@ -3125,13 +3475,7 @@ def test_update_worker_pool( assert args[0] == cloudbuild.UpdateWorkerPoolRequest() # Establish that the response is the type that we expect. - assert isinstance(response, cloudbuild.WorkerPool) - assert response.name == "name_value" - assert response.project_id == "project_id_value" - assert response.service_account_email == "service_account_email_value" - assert response.worker_count == 1314 - assert response.regions == [cloudbuild.WorkerPool.Region.US_CENTRAL1] - assert response.status == cloudbuild.WorkerPool.Status.CREATING + assert isinstance(response, future.Future) def test_update_worker_pool_from_dict(): @@ -3173,35 +3517,161 @@ async def test_update_worker_pool_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - cloudbuild.WorkerPool( - name="name_value", - project_id="project_id_value", - service_account_email="service_account_email_value", - worker_count=1314, - regions=[cloudbuild.WorkerPool.Region.US_CENTRAL1], - status=cloudbuild.WorkerPool.Status.CREATING, - ) + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cloudbuild.UpdateWorkerPoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_worker_pool_async_from_dict(): + await test_update_worker_pool_async(request_type=dict) + + +def test_update_worker_pool_field_headers(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.UpdateWorkerPoolRequest() + + request.worker_pool.name = "worker_pool.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_worker_pool), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "worker_pool.name=worker_pool.name/value",) in kw[ + "metadata" + ] + + +@pytest.mark.asyncio +async def test_update_worker_pool_field_headers_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.UpdateWorkerPoolRequest() + + request.worker_pool.name = "worker_pool.name/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_worker_pool), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_worker_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "worker_pool.name=worker_pool.name/value",) in kw[ + "metadata" + ] + + +def test_update_worker_pool_flattened(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_worker_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_worker_pool( + worker_pool=cloudbuild.WorkerPool(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].worker_pool == cloudbuild.WorkerPool(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) + + +def test_update_worker_pool_flattened_error(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_worker_pool( + cloudbuild.UpdateWorkerPoolRequest(), + worker_pool=cloudbuild.WorkerPool(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_worker_pool_flattened_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_worker_pool), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_worker_pool( + worker_pool=cloudbuild.WorkerPool(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), ) - response = await client.update_worker_pool(request) - # Establish that the underlying gRPC stub method was called. + # Establish that the underlying call was made with the expected + # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - assert args[0] == cloudbuild.UpdateWorkerPoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cloudbuild.WorkerPool) - assert response.name == "name_value" - assert response.project_id == "project_id_value" - assert response.service_account_email == "service_account_email_value" - assert response.worker_count == 1314 - assert response.regions == [cloudbuild.WorkerPool.Region.US_CENTRAL1] - assert response.status == cloudbuild.WorkerPool.Status.CREATING + assert args[0].worker_pool == cloudbuild.WorkerPool(name="name_value") + assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"]) @pytest.mark.asyncio -async def test_update_worker_pool_async_from_dict(): - await test_update_worker_pool_async(request_type=dict) +async def test_update_worker_pool_flattened_error_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_worker_pool( + cloudbuild.UpdateWorkerPoolRequest(), + worker_pool=cloudbuild.WorkerPool(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) def test_list_worker_pools( @@ -3220,7 +3690,9 @@ def test_list_worker_pools( type(client.transport.list_worker_pools), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = cloudbuild.ListWorkerPoolsResponse() + call.return_value = cloudbuild.ListWorkerPoolsResponse( + next_page_token="next_page_token_value", + ) response = client.list_worker_pools(request) # Establish that the underlying gRPC stub method was called. @@ -3229,7 +3701,8 @@ def test_list_worker_pools( assert args[0] == cloudbuild.ListWorkerPoolsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, cloudbuild.ListWorkerPoolsResponse) + assert isinstance(response, pagers.ListWorkerPoolsPager) + assert response.next_page_token == "next_page_token_value" def test_list_worker_pools_from_dict(): @@ -3271,7 +3744,7 @@ async def test_list_worker_pools_async( ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - cloudbuild.ListWorkerPoolsResponse() + cloudbuild.ListWorkerPoolsResponse(next_page_token="next_page_token_value",) ) response = await client.list_worker_pools(request) @@ -3281,7 +3754,8 @@ async def test_list_worker_pools_async( assert args[0] == cloudbuild.ListWorkerPoolsRequest() # Establish that the response is the type that we expect. - assert isinstance(response, cloudbuild.ListWorkerPoolsResponse) + assert isinstance(response, pagers.ListWorkerPoolsAsyncPager) + assert response.next_page_token == "next_page_token_value" @pytest.mark.asyncio @@ -3289,6 +3763,275 @@ async def test_list_worker_pools_async_from_dict(): await test_list_worker_pools_async(request_type=dict) +def test_list_worker_pools_field_headers(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.ListWorkerPoolsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), "__call__" + ) as call: + call.return_value = cloudbuild.ListWorkerPoolsResponse() + client.list_worker_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_worker_pools_field_headers_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cloudbuild.ListWorkerPoolsRequest() + + request.parent = "parent/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloudbuild.ListWorkerPoolsResponse() + ) + await client.list_worker_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"] + + +def test_list_worker_pools_flattened(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cloudbuild.ListWorkerPoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_worker_pools(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +def test_list_worker_pools_flattened_error(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_worker_pools( + cloudbuild.ListWorkerPoolsRequest(), parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_worker_pools_flattened_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = cloudbuild.ListWorkerPoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + cloudbuild.ListWorkerPoolsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_worker_pools(parent="parent_value",) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" + + +@pytest.mark.asyncio +async def test_list_worker_pools_flattened_error_async(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials(),) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_worker_pools( + cloudbuild.ListWorkerPoolsRequest(), parent="parent_value", + ) + + +def test_list_worker_pools_pager(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[ + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + ], + next_page_token="abc", + ), + cloudbuild.ListWorkerPoolsResponse(worker_pools=[], next_page_token="def",), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(),], next_page_token="ghi", + ), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(), cloudbuild.WorkerPool(),], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_worker_pools(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, cloudbuild.WorkerPool) for i in results) + + +def test_list_worker_pools_pages(): + client = CloudBuildClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[ + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + ], + next_page_token="abc", + ), + cloudbuild.ListWorkerPoolsResponse(worker_pools=[], next_page_token="def",), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(),], next_page_token="ghi", + ), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(), cloudbuild.WorkerPool(),], + ), + RuntimeError, + ) + pages = list(client.list_worker_pools(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_worker_pools_async_pager(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[ + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + ], + next_page_token="abc", + ), + cloudbuild.ListWorkerPoolsResponse(worker_pools=[], next_page_token="def",), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(),], next_page_token="ghi", + ), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(), cloudbuild.WorkerPool(),], + ), + RuntimeError, + ) + async_pager = await client.list_worker_pools(request={},) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, cloudbuild.WorkerPool) for i in responses) + + +@pytest.mark.asyncio +async def test_list_worker_pools_async_pages(): + client = CloudBuildAsyncClient(credentials=ga_credentials.AnonymousCredentials,) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_worker_pools), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[ + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + cloudbuild.WorkerPool(), + ], + next_page_token="abc", + ), + cloudbuild.ListWorkerPoolsResponse(worker_pools=[], next_page_token="def",), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(),], next_page_token="ghi", + ), + cloudbuild.ListWorkerPoolsResponse( + worker_pools=[cloudbuild.WorkerPool(), cloudbuild.WorkerPool(),], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_worker_pools(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.CloudBuildGrpcTransport( @@ -3830,10 +4573,32 @@ def test_parse_crypto_key_path(): assert expected == actual -def test_secret_version_path(): +def test_network_path(): project = "oyster" - secret = "nudibranch" - version = "cuttlefish" + network = "nudibranch" + expected = "projects/{project}/global/networks/{network}".format( + project=project, network=network, + ) + actual = CloudBuildClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "cuttlefish", + "network": "mussel", + } + path = CloudBuildClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = CloudBuildClient.parse_network_path(path) + assert expected == actual + + +def test_secret_version_path(): + project = "winkle" + secret = "nautilus" + version = "scallop" expected = "projects/{project}/secrets/{secret}/versions/{version}".format( project=project, secret=secret, version=version, ) @@ -3843,9 +4608,9 @@ def test_secret_version_path(): def test_parse_secret_version_path(): expected = { - "project": "mussel", - "secret": "winkle", - "version": "nautilus", + "project": "abalone", + "secret": "squid", + "version": "clam", } path = CloudBuildClient.secret_version_path(**expected) @@ -3855,8 +4620,8 @@ def test_parse_secret_version_path(): def test_service_account_path(): - project = "scallop" - service_account = "abalone" + project = "whelk" + service_account = "octopus" expected = "projects/{project}/serviceAccounts/{service_account}".format( project=project, service_account=service_account, ) @@ -3866,8 +4631,8 @@ def test_service_account_path(): def test_parse_service_account_path(): expected = { - "project": "squid", - "service_account": "clam", + "project": "oyster", + "service_account": "nudibranch", } path = CloudBuildClient.service_account_path(**expected) @@ -3877,8 +4642,8 @@ def test_parse_service_account_path(): def test_subscription_path(): - project = "whelk" - subscription = "octopus" + project = "cuttlefish" + subscription = "mussel" expected = "projects/{project}/subscriptions/{subscription}".format( project=project, subscription=subscription, ) @@ -3888,8 +4653,8 @@ def test_subscription_path(): def test_parse_subscription_path(): expected = { - "project": "oyster", - "subscription": "nudibranch", + "project": "winkle", + "subscription": "nautilus", } path = CloudBuildClient.subscription_path(**expected) @@ -3899,8 +4664,8 @@ def test_parse_subscription_path(): def test_topic_path(): - project = "cuttlefish" - topic = "mussel" + project = "scallop" + topic = "abalone" expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic,) actual = CloudBuildClient.topic_path(project, topic) assert expected == actual @@ -3908,8 +4673,8 @@ def test_topic_path(): def test_parse_topic_path(): expected = { - "project": "winkle", - "topic": "nautilus", + "project": "squid", + "topic": "clam", } path = CloudBuildClient.topic_path(**expected) @@ -3918,8 +4683,32 @@ def test_parse_topic_path(): assert expected == actual +def test_worker_pool_path(): + project = "whelk" + location = "octopus" + worker_pool = "oyster" + expected = "projects/{project}/locations/{location}/workerPools/{worker_pool}".format( + project=project, location=location, worker_pool=worker_pool, + ) + actual = CloudBuildClient.worker_pool_path(project, location, worker_pool) + assert expected == actual + + +def test_parse_worker_pool_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "worker_pool": "mussel", + } + path = CloudBuildClient.worker_pool_path(**expected) + + # Check that the path construction is reversible. + actual = CloudBuildClient.parse_worker_pool_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "scallop" + billing_account = "winkle" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -3929,7 +4718,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "abalone", + "billing_account": "nautilus", } path = CloudBuildClient.common_billing_account_path(**expected) @@ -3939,7 +4728,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "squid" + folder = "scallop" expected = "folders/{folder}".format(folder=folder,) actual = CloudBuildClient.common_folder_path(folder) assert expected == actual @@ -3947,7 +4736,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "clam", + "folder": "abalone", } path = CloudBuildClient.common_folder_path(**expected) @@ -3957,7 +4746,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "whelk" + organization = "squid" expected = "organizations/{organization}".format(organization=organization,) actual = CloudBuildClient.common_organization_path(organization) assert expected == actual @@ -3965,7 +4754,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "octopus", + "organization": "clam", } path = CloudBuildClient.common_organization_path(**expected) @@ -3975,7 +4764,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "oyster" + project = "whelk" expected = "projects/{project}".format(project=project,) actual = CloudBuildClient.common_project_path(project) assert expected == actual @@ -3983,7 +4772,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nudibranch", + "project": "octopus", } path = CloudBuildClient.common_project_path(**expected) @@ -3993,8 +4782,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "cuttlefish" - location = "mussel" + project = "oyster" + location = "nudibranch" expected = "projects/{project}/locations/{location}".format( project=project, location=location, ) @@ -4004,8 +4793,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "winkle", - "location": "nautilus", + "project": "cuttlefish", + "location": "mussel", } path = CloudBuildClient.common_location_path(**expected)