diff --git a/docs/bigquery_storage_v1/types.rst b/docs/bigquery_storage_v1/types.rst index 28b5db72..3f722c57 100644 --- a/docs/bigquery_storage_v1/types.rst +++ b/docs/bigquery_storage_v1/types.rst @@ -3,5 +3,4 @@ Types for Google Cloud Bigquery Storage v1 API .. automodule:: google.cloud.bigquery_storage_v1.types :members: - :undoc-members: :show-inheritance: diff --git a/docs/bigquery_storage_v1beta2/types.rst b/docs/bigquery_storage_v1beta2/types.rst index 995806da..c70a8cf3 100644 --- a/docs/bigquery_storage_v1beta2/types.rst +++ b/docs/bigquery_storage_v1beta2/types.rst @@ -3,5 +3,4 @@ Types for Google Cloud Bigquery Storage v1beta2 API .. automodule:: google.cloud.bigquery_storage_v1beta2.types :members: - :undoc-members: :show-inheritance: diff --git a/google/cloud/bigquery_storage/__init__.py b/google/cloud/bigquery_storage/__init__.py index af53e82d..311d42a9 100644 --- a/google/cloud/bigquery_storage/__init__.py +++ b/google/cloud/bigquery_storage/__init__.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.bigquery_storage import gapic_version as package_version + +__version__ = package_version.__version__ + from google.cloud.bigquery_storage_v1 import BigQueryReadClient from google.cloud.bigquery_storage_v1.services.big_query_write.client import ( diff --git a/google/cloud/bigquery_storage/gapic_version.py b/google/cloud/bigquery_storage/gapic_version.py new file mode 100644 index 00000000..35859c3f --- /dev/null +++ b/google/cloud/bigquery_storage/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.1.0" diff --git a/google/cloud/bigquery_storage_v1/services/big_query_read/async_client.py b/google/cloud/bigquery_storage_v1/services/big_query_read/async_client.py index 99188400..6c2f687e 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_read/async_client.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_read/async_client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, AsyncIterable, Awaitable, @@ -173,9 +175,9 @@ def transport(self) -> BigQueryReadTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigQueryReadTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query read client. @@ -219,13 +221,13 @@ def __init__( async def create_read_session( self, - request: Union[storage.CreateReadSessionRequest, dict] = None, + request: Optional[Union[storage.CreateReadSessionRequest, dict]] = None, *, - parent: str = None, - read_session: stream.ReadSession = None, - max_stream_count: int = None, + parent: Optional[str] = None, + read_session: Optional[stream.ReadSession] = None, + max_stream_count: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.ReadSession: r"""Creates a new read session. A read session divides @@ -279,7 +281,7 @@ async def sample_create_read_session(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.CreateReadSessionRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.CreateReadSessionRequest, dict]]): The request object. Request message for `CreateReadSession`. parent (:class:`str`): @@ -382,12 +384,12 @@ async def sample_create_read_session(): def read_rows( self, - request: Union[storage.ReadRowsRequest, dict] = None, + request: Optional[Union[storage.ReadRowsRequest, dict]] = None, *, - read_stream: str = None, - offset: int = None, + read_stream: Optional[str] = None, + offset: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[storage.ReadRowsResponse]]: r"""Reads rows from the stream in the format prescribed @@ -427,7 +429,7 @@ async def sample_read_rows(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.ReadRowsRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.ReadRowsRequest, dict]]): The request object. Request message for `ReadRows`. read_stream (:class:`str`): Required. Stream to read rows from. @@ -513,10 +515,10 @@ async def sample_read_rows(): async def split_read_stream( self, - request: Union[storage.SplitReadStreamRequest, dict] = None, + request: Optional[Union[storage.SplitReadStreamRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.SplitReadStreamResponse: r"""Splits a given ``ReadStream`` into two ``ReadStream`` objects. @@ -560,7 +562,7 @@ async def sample_split_read_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.SplitReadStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.SplitReadStreamRequest, dict]]): The request object. Request message for `SplitReadStream`. retry (google.api_core.retry.Retry): Designation of what errors, if any, diff --git a/google/cloud/bigquery_storage_v1/services/big_query_read/client.py b/google/cloud/bigquery_storage_v1/services/big_query_read/client.py index 20de9fbd..6198dd23 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_read/client.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_read/client.py @@ -16,7 +16,19 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Iterable, + Sequence, + Tuple, + Type, + Union, + cast, +) import pkg_resources from google.api_core import client_options as client_options_lib @@ -58,7 +70,7 @@ class BigQueryReadClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigQueryReadTransport]: """Returns an appropriate transport class. @@ -381,8 +393,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigQueryReadTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigQueryReadTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query read client. @@ -396,7 +408,7 @@ def __init__( transport (Union[str, BigQueryReadTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -426,6 +438,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -478,13 +491,13 @@ def __init__( def create_read_session( self, - request: Union[storage.CreateReadSessionRequest, dict] = None, + request: Optional[Union[storage.CreateReadSessionRequest, dict]] = None, *, - parent: str = None, - read_session: stream.ReadSession = None, - max_stream_count: int = None, + parent: Optional[str] = None, + read_session: Optional[stream.ReadSession] = None, + max_stream_count: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.ReadSession: r"""Creates a new read session. A read session divides @@ -631,12 +644,12 @@ def sample_create_read_session(): def read_rows( self, - request: Union[storage.ReadRowsRequest, dict] = None, + request: Optional[Union[storage.ReadRowsRequest, dict]] = None, *, - read_stream: str = None, - offset: int = None, + read_stream: Optional[str] = None, + offset: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[storage.ReadRowsResponse]: r"""Reads rows from the stream in the format prescribed @@ -753,10 +766,10 @@ def sample_read_rows(): def split_read_stream( self, - request: Union[storage.SplitReadStreamRequest, dict] = None, + request: Optional[Union[storage.SplitReadStreamRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.SplitReadStreamResponse: r"""Splits a given ``ReadStream`` into two ``ReadStream`` objects. diff --git a/google/cloud/bigquery_storage_v1/services/big_query_read/transports/base.py b/google/cloud/bigquery_storage_v1/services/big_query_read/transports/base.py index fa95cabe..27917703 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_read/transports/base.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_read/transports/base.py @@ -52,7 +52,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc.py b/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc.py index 3717ba1e..eacd1379 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc.py @@ -49,14 +49,14 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -183,8 +183,8 @@ def __init__( def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc_asyncio.py b/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc_asyncio.py index b47e745d..b80db42b 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc_asyncio.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_read/transports/grpc_asyncio.py @@ -51,7 +51,7 @@ class BigQueryReadGrpcAsyncIOTransport(BigQueryReadTransport): def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -94,15 +94,15 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1/services/big_query_write/async_client.py b/google/cloud/bigquery_storage_v1/services/big_query_write/async_client.py index cf87a316..1839b0b6 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_write/async_client.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_write/async_client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, AsyncIterable, Awaitable, @@ -178,9 +180,9 @@ def transport(self) -> BigQueryWriteTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigQueryWriteTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query write client. @@ -224,12 +226,12 @@ def __init__( async def create_write_stream( self, - request: Union[storage.CreateWriteStreamRequest, dict] = None, + request: Optional[Union[storage.CreateWriteStreamRequest, dict]] = None, *, - parent: str = None, - write_stream: stream.WriteStream = None, + parent: Optional[str] = None, + write_stream: Optional[stream.WriteStream] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Creates a write stream to the given table. Additionally, every @@ -267,7 +269,7 @@ async def sample_create_write_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.CreateWriteStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.CreateWriteStreamRequest, dict]]): The request object. Request message for `CreateWriteStream`. parent (:class:`str`): @@ -353,10 +355,10 @@ async def sample_create_write_stream(): def append_rows( self, - requests: AsyncIterator[storage.AppendRowsRequest] = None, + requests: Optional[AsyncIterator[storage.AppendRowsRequest]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[storage.AppendRowsResponse]]: r"""Appends data to the given stream. @@ -394,13 +396,6 @@ def append_rows( rpc), and the stream is explicitly committed via the ``BatchCommitWriteStreams`` rpc. - Note: For users coding against the gRPC api directly, it may be - necessary to supply the x-goog-request-params system parameter - with ``write_stream=``. - - More information about system parameters: - https://cloud.google.com/apis/docs/system-parameters - .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -493,11 +488,11 @@ def request_generator(): async def get_write_stream( self, - request: Union[storage.GetWriteStreamRequest, dict] = None, + request: Optional[Union[storage.GetWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Gets information about a write stream. @@ -529,7 +524,7 @@ async def sample_get_write_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.GetWriteStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.GetWriteStreamRequest, dict]]): The request object. Request message for `GetWriteStreamRequest`. name (:class:`str`): @@ -606,11 +601,11 @@ async def sample_get_write_stream(): async def finalize_write_stream( self, - request: Union[storage.FinalizeWriteStreamRequest, dict] = None, + request: Optional[Union[storage.FinalizeWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FinalizeWriteStreamResponse: r"""Finalize a write stream so that no new data can be appended to @@ -643,7 +638,7 @@ async def sample_finalize_write_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamRequest, dict]]): The request object. Request message for invoking `FinalizeWriteStream`. name (:class:`str`): @@ -717,11 +712,11 @@ async def sample_finalize_write_stream(): async def batch_commit_write_streams( self, - request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None, + request: Optional[Union[storage.BatchCommitWriteStreamsRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.BatchCommitWriteStreamsResponse: r"""Atomically commits a group of ``PENDING`` streams that belong to @@ -759,7 +754,7 @@ async def sample_batch_commit_write_streams(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsRequest, dict]]): The request object. Request message for `BatchCommitWriteStreams`. parent (:class:`str`): @@ -834,11 +829,11 @@ async def sample_batch_commit_write_streams(): async def flush_rows( self, - request: Union[storage.FlushRowsRequest, dict] = None, + request: Optional[Union[storage.FlushRowsRequest, dict]] = None, *, - write_stream: str = None, + write_stream: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FlushRowsResponse: r"""Flushes rows to a BUFFERED stream. @@ -879,7 +874,7 @@ async def sample_flush_rows(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1.types.FlushRowsRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1.types.FlushRowsRequest, dict]]): The request object. Request message for `FlushRows`. write_stream (:class:`str`): Required. The stream that is the diff --git a/google/cloud/bigquery_storage_v1/services/big_query_write/client.py b/google/cloud/bigquery_storage_v1/services/big_query_write/client.py index ad13d913..5d90b216 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_write/client.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_write/client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, Iterable, Iterator, @@ -26,6 +28,7 @@ Tuple, Type, Union, + cast, ) import pkg_resources @@ -68,7 +71,7 @@ class BigQueryWriteClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigQueryWriteTransport]: """Returns an appropriate transport class. @@ -371,8 +374,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigQueryWriteTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigQueryWriteTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query write client. @@ -386,7 +389,7 @@ def __init__( transport (Union[str, BigQueryWriteTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -416,6 +419,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -468,12 +472,12 @@ def __init__( def create_write_stream( self, - request: Union[storage.CreateWriteStreamRequest, dict] = None, + request: Optional[Union[storage.CreateWriteStreamRequest, dict]] = None, *, - parent: str = None, - write_stream: stream.WriteStream = None, + parent: Optional[str] = None, + write_stream: Optional[stream.WriteStream] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Creates a write stream to the given table. Additionally, every @@ -586,10 +590,10 @@ def sample_create_write_stream(): def append_rows( self, - requests: Iterator[storage.AppendRowsRequest] = None, + requests: Optional[Iterator[storage.AppendRowsRequest]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[storage.AppendRowsResponse]: r"""Appends data to the given stream. @@ -627,13 +631,6 @@ def append_rows( rpc), and the stream is explicitly committed via the ``BatchCommitWriteStreams`` rpc. - Note: For users coding against the gRPC api directly, it may be - necessary to supply the x-goog-request-params system parameter - with ``write_stream=``. - - More information about system parameters: - https://cloud.google.com/apis/docs/system-parameters - .. code-block:: python # This snippet has been automatically generated and should be regarded as a @@ -713,11 +710,11 @@ def request_generator(): def get_write_stream( self, - request: Union[storage.GetWriteStreamRequest, dict] = None, + request: Optional[Union[storage.GetWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Gets information about a write stream. @@ -816,11 +813,11 @@ def sample_get_write_stream(): def finalize_write_stream( self, - request: Union[storage.FinalizeWriteStreamRequest, dict] = None, + request: Optional[Union[storage.FinalizeWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FinalizeWriteStreamResponse: r"""Finalize a write stream so that no new data can be appended to @@ -917,11 +914,11 @@ def sample_finalize_write_stream(): def batch_commit_write_streams( self, - request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None, + request: Optional[Union[storage.BatchCommitWriteStreamsRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.BatchCommitWriteStreamsResponse: r"""Atomically commits a group of ``PENDING`` streams that belong to @@ -1026,11 +1023,11 @@ def sample_batch_commit_write_streams(): def flush_rows( self, - request: Union[storage.FlushRowsRequest, dict] = None, + request: Optional[Union[storage.FlushRowsRequest, dict]] = None, *, - write_stream: str = None, + write_stream: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FlushRowsResponse: r"""Flushes rows to a BUFFERED stream. diff --git a/google/cloud/bigquery_storage_v1/services/big_query_write/transports/base.py b/google/cloud/bigquery_storage_v1/services/big_query_write/transports/base.py index 6eeb80d5..7aa900dd 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_write/transports/base.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_write/transports/base.py @@ -53,7 +53,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc.py b/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc.py index 43337342..535c6e3b 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc.py @@ -51,14 +51,14 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -185,8 +185,8 @@ def __init__( def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, @@ -306,13 +306,6 @@ def append_rows( rpc), and the stream is explicitly committed via the ``BatchCommitWriteStreams`` rpc. - Note: For users coding against the gRPC api directly, it may be - necessary to supply the x-goog-request-params system parameter - with ``write_stream=``. - - More information about system parameters: - https://cloud.google.com/apis/docs/system-parameters - Returns: Callable[[~.AppendRowsRequest], ~.AppendRowsResponse]: diff --git a/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc_asyncio.py b/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc_asyncio.py index c3a47d81..e01d718c 100644 --- a/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc_asyncio.py +++ b/google/cloud/bigquery_storage_v1/services/big_query_write/transports/grpc_asyncio.py @@ -53,7 +53,7 @@ class BigQueryWriteGrpcAsyncIOTransport(BigQueryWriteTransport): def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -96,15 +96,15 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, @@ -309,13 +309,6 @@ def append_rows( rpc), and the stream is explicitly committed via the ``BatchCommitWriteStreams`` rpc. - Note: For users coding against the gRPC api directly, it may be - necessary to supply the x-goog-request-params system parameter - with ``write_stream=``. - - More information about system parameters: - https://cloud.google.com/apis/docs/system-parameters - Returns: Callable[[~.AppendRowsRequest], Awaitable[~.AppendRowsResponse]]: diff --git a/google/cloud/bigquery_storage_v1/types/arrow.py b/google/cloud/bigquery_storage_v1/types/arrow.py index 80c37cad..4376b037 100644 --- a/google/cloud/bigquery_storage_v1/types/arrow.py +++ b/google/cloud/bigquery_storage_v1/types/arrow.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -38,7 +40,7 @@ class ArrowSchema(proto.Message): IPC serialized Arrow schema. """ - serialized_schema = proto.Field( + serialized_schema: bytes = proto.Field( proto.BYTES, number=1, ) @@ -56,11 +58,11 @@ class ArrowRecordBatch(proto.Message): format-independent ReadRowsResponse.row_count instead. """ - serialized_record_batch = proto.Field( + serialized_record_batch: bytes = proto.Field( proto.BYTES, number=1, ) - row_count = proto.Field( + row_count: int = proto.Field( proto.INT64, number=2, ) @@ -81,7 +83,7 @@ class CompressionCodec(proto.Enum): LZ4_FRAME = 1 ZSTD = 2 - buffer_compression = proto.Field( + buffer_compression: CompressionCodec = proto.Field( proto.ENUM, number=2, enum=CompressionCodec, diff --git a/google/cloud/bigquery_storage_v1/types/avro.py b/google/cloud/bigquery_storage_v1/types/avro.py index 59ce1133..75a63f37 100644 --- a/google/cloud/bigquery_storage_v1/types/avro.py +++ b/google/cloud/bigquery_storage_v1/types/avro.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -35,7 +37,7 @@ class AvroSchema(proto.Message): https://avro.apache.org/docs/1.8.1/spec.html. """ - schema = proto.Field( + schema: str = proto.Field( proto.STRING, number=1, ) @@ -53,11 +55,11 @@ class AvroRows(proto.Message): instead. """ - serialized_binary_rows = proto.Field( + serialized_binary_rows: bytes = proto.Field( proto.BYTES, number=1, ) - row_count = proto.Field( + row_count: int = proto.Field( proto.INT64, number=2, ) @@ -81,7 +83,7 @@ class AvroSerializationOptions(proto.Message): with the original column name. """ - enable_display_name_attribute = proto.Field( + enable_display_name_attribute: bool = proto.Field( proto.BOOL, number=1, ) diff --git a/google/cloud/bigquery_storage_v1/types/protobuf.py b/google/cloud/bigquery_storage_v1/types/protobuf.py index 949907b7..2f218225 100644 --- a/google/cloud/bigquery_storage_v1/types/protobuf.py +++ b/google/cloud/bigquery_storage_v1/types/protobuf.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import descriptor_pb2 # type: ignore @@ -46,7 +48,7 @@ class ProtoSchema(proto.Message): https://cloud.google.com/bigquery/docs/write-api#data_type_conversions """ - proto_descriptor = proto.Field( + proto_descriptor: descriptor_pb2.DescriptorProto = proto.Field( proto.MESSAGE, number=1, message=descriptor_pb2.DescriptorProto, @@ -57,7 +59,7 @@ class ProtoRows(proto.Message): r""" Attributes: - serialized_rows (Sequence[bytes]): + serialized_rows (MutableSequence[bytes]): A sequence of rows serialized as a Protocol Buffer. See @@ -66,7 +68,7 @@ class ProtoRows(proto.Message): field. """ - serialized_rows = proto.RepeatedField( + serialized_rows: MutableSequence[bytes] = proto.RepeatedField( proto.BYTES, number=1, ) diff --git a/google/cloud/bigquery_storage_v1/types/storage.py b/google/cloud/bigquery_storage_v1/types/storage.py index faeaf72b..4f4a871b 100644 --- a/google/cloud/bigquery_storage_v1/types/storage.py +++ b/google/cloud/bigquery_storage_v1/types/storage.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigquery_storage_v1.types import arrow @@ -87,20 +89,20 @@ class CreateReadSessionRequest(proto.Message): might provide less. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - read_session = proto.Field( + read_session: stream.ReadSession = proto.Field( proto.MESSAGE, number=2, message=stream.ReadSession, ) - max_stream_count = proto.Field( + max_stream_count: int = proto.Field( proto.INT32, number=3, ) - preferred_min_stream_count = proto.Field( + preferred_min_stream_count: int = proto.Field( proto.INT32, number=4, ) @@ -119,11 +121,11 @@ class ReadRowsRequest(proto.Message): reading from offset zero. """ - read_stream = proto.Field( + read_stream: str = proto.Field( proto.STRING, number=1, ) - offset = proto.Field( + offset: int = proto.Field( proto.INT64, number=2, ) @@ -139,7 +141,7 @@ class ThrottleState(proto.Message): throttled. """ - throttle_percent = proto.Field( + throttle_percent: int = proto.Field( proto.INT32, number=1, ) @@ -176,16 +178,16 @@ class Progress(proto.Message): includes the rows in the current response. """ - at_response_start = proto.Field( + at_response_start: float = proto.Field( proto.DOUBLE, number=1, ) - at_response_end = proto.Field( + at_response_end: float = proto.Field( proto.DOUBLE, number=2, ) - progress = proto.Field( + progress: Progress = proto.Field( proto.MESSAGE, number=2, message=Progress, @@ -231,39 +233,39 @@ class ReadRowsResponse(proto.Message): This field is a member of `oneof`_ ``schema``. """ - avro_rows = proto.Field( + avro_rows: avro.AvroRows = proto.Field( proto.MESSAGE, number=3, oneof="rows", message=avro.AvroRows, ) - arrow_record_batch = proto.Field( + arrow_record_batch: arrow.ArrowRecordBatch = proto.Field( proto.MESSAGE, number=4, oneof="rows", message=arrow.ArrowRecordBatch, ) - row_count = proto.Field( + row_count: int = proto.Field( proto.INT64, number=6, ) - stats = proto.Field( + stats: "StreamStats" = proto.Field( proto.MESSAGE, number=2, message="StreamStats", ) - throttle_state = proto.Field( + throttle_state: "ThrottleState" = proto.Field( proto.MESSAGE, number=5, message="ThrottleState", ) - avro_schema = proto.Field( + avro_schema: avro.AvroSchema = proto.Field( proto.MESSAGE, number=7, oneof="schema", message=avro.AvroSchema, ) - arrow_schema = proto.Field( + arrow_schema: arrow.ArrowSchema = proto.Field( proto.MESSAGE, number=8, oneof="schema", @@ -292,11 +294,11 @@ class SplitReadStreamRequest(proto.Message): side. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - fraction = proto.Field( + fraction: float = proto.Field( proto.DOUBLE, number=2, ) @@ -316,12 +318,12 @@ class SplitReadStreamResponse(proto.Message): original stream can no longer be split. """ - primary_stream = proto.Field( + primary_stream: stream.ReadStream = proto.Field( proto.MESSAGE, number=1, message=stream.ReadStream, ) - remainder_stream = proto.Field( + remainder_stream: stream.ReadStream = proto.Field( proto.MESSAGE, number=2, message=stream.ReadStream, @@ -340,11 +342,11 @@ class CreateWriteStreamRequest(proto.Message): Required. Stream to be created. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - write_stream = proto.Field( + write_stream: stream.WriteStream = proto.Field( proto.MESSAGE, number=2, message=stream.WriteStream, @@ -394,8 +396,37 @@ class AppendRowsRequest(proto.Message): trace_id (str): Id set by client to annotate its identity. Only initial request setting is respected. + missing_value_interpretations (MutableMapping[str, google.cloud.bigquery_storage_v1.types.AppendRowsRequest.MissingValueInterpretation]): + A map to indicate how to interpret missing value for some + fields. Missing values are fields present in user schema but + missing in rows. The key is the field name. The value is the + interpretation of missing values for the field. + + For example, a map {'foo': NULL_VALUE, 'bar': DEFAULT_VALUE} + means all missing values in field foo are interpreted as + NULL, all missing values in field bar are interpreted as the + default value of field bar in table schema. + + If a field is not in this map and has missing values, the + missing values in this field are interpreted as NULL. + + This field only applies to the current request, it won't + affect other requests on the connection. + + Currently, field name can only be top-level column name, + can't be a struct field path like 'foo.bar'. """ + class MissingValueInterpretation(proto.Enum): + r"""An enum to indicate how to interpret missing values. Missing + values are fields present in user schema but missing in rows. A + missing value can represent a NULL or a column default value + defined in BigQuery table schema. + """ + MISSING_VALUE_INTERPRETATION_UNSPECIFIED = 0 + NULL_VALUE = 1 + DEFAULT_VALUE = 2 + class ProtoData(proto.Message): r"""ProtoData contains the data rows and schema when constructing append requests. @@ -415,36 +446,44 @@ class ProtoData(proto.Message): to how default values are encoded. """ - writer_schema = proto.Field( + writer_schema: protobuf.ProtoSchema = proto.Field( proto.MESSAGE, number=1, message=protobuf.ProtoSchema, ) - rows = proto.Field( + rows: protobuf.ProtoRows = proto.Field( proto.MESSAGE, number=2, message=protobuf.ProtoRows, ) - write_stream = proto.Field( + write_stream: str = proto.Field( proto.STRING, number=1, ) - offset = proto.Field( + offset: wrappers_pb2.Int64Value = proto.Field( proto.MESSAGE, number=2, message=wrappers_pb2.Int64Value, ) - proto_rows = proto.Field( + proto_rows: ProtoData = proto.Field( proto.MESSAGE, number=4, oneof="rows", message=ProtoData, ) - trace_id = proto.Field( + trace_id: str = proto.Field( proto.STRING, number=6, ) + missing_value_interpretations: MutableMapping[ + str, MissingValueInterpretation + ] = proto.MapField( + proto.STRING, + proto.ENUM, + number=7, + enum=MissingValueInterpretation, + ) class AppendRowsResponse(proto.Message): @@ -493,7 +532,7 @@ class AppendRowsResponse(proto.Message): to user so that user can use it to input new type of message. It will be empty when no schema updates have occurred. - row_errors (Sequence[google.cloud.bigquery_storage_v1.types.RowError]): + row_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.RowError]): If a request failed due to corrupted rows, no rows in the batch will be appended. The API will return row level error info, so that the caller @@ -513,35 +552,35 @@ class AppendResult(proto.Message): appending using default streams. """ - offset = proto.Field( + offset: wrappers_pb2.Int64Value = proto.Field( proto.MESSAGE, number=1, message=wrappers_pb2.Int64Value, ) - append_result = proto.Field( + append_result: AppendResult = proto.Field( proto.MESSAGE, number=1, oneof="response", message=AppendResult, ) - error = proto.Field( + error: status_pb2.Status = proto.Field( proto.MESSAGE, number=2, oneof="response", message=status_pb2.Status, ) - updated_schema = proto.Field( + updated_schema: table.TableSchema = proto.Field( proto.MESSAGE, number=3, message=table.TableSchema, ) - row_errors = proto.RepeatedField( + row_errors: MutableSequence["RowError"] = proto.RepeatedField( proto.MESSAGE, number=4, message="RowError", ) - write_stream = proto.Field( + write_stream: str = proto.Field( proto.STRING, number=5, ) @@ -560,11 +599,11 @@ class GetWriteStreamRequest(proto.Message): will be basic. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - view = proto.Field( + view: stream.WriteStreamView = proto.Field( proto.ENUM, number=3, enum=stream.WriteStreamView, @@ -579,16 +618,16 @@ class BatchCommitWriteStreamsRequest(proto.Message): Required. Parent table that all the streams should belong to, in the form of ``projects/{project}/datasets/{dataset}/tables/{table}``. - write_streams (Sequence[str]): + write_streams (MutableSequence[str]): Required. The group of streams that will be committed atomically. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - write_streams = proto.RepeatedField( + write_streams: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) @@ -603,7 +642,7 @@ class BatchCommitWriteStreamsResponse(proto.Message): granularity. This field will only exist when there are no stream errors. **Note** if this field is not set, it means the commit was not successful. - stream_errors (Sequence[google.cloud.bigquery_storage_v1.types.StorageError]): + stream_errors (MutableSequence[google.cloud.bigquery_storage_v1.types.StorageError]): Stream level error if commit failed. Only streams with error will be in the list. If empty, there is no error and all streams are @@ -612,12 +651,12 @@ class BatchCommitWriteStreamsResponse(proto.Message): due to atomicity guarantee. """ - commit_time = proto.Field( + commit_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) - stream_errors = proto.RepeatedField( + stream_errors: MutableSequence["StorageError"] = proto.RepeatedField( proto.MESSAGE, number=2, message="StorageError", @@ -633,7 +672,7 @@ class FinalizeWriteStreamRequest(proto.Message): ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -647,7 +686,7 @@ class FinalizeWriteStreamResponse(proto.Message): Number of rows in the finalized stream. """ - row_count = proto.Field( + row_count: int = proto.Field( proto.INT64, number=1, ) @@ -666,11 +705,11 @@ class FlushRowsRequest(proto.Message): be flushed. """ - write_stream = proto.Field( + write_stream: str = proto.Field( proto.STRING, number=1, ) - offset = proto.Field( + offset: wrappers_pb2.Int64Value = proto.Field( proto.MESSAGE, number=2, message=wrappers_pb2.Int64Value, @@ -686,7 +725,7 @@ class FlushRowsResponse(proto.Message): offset) are flushed. """ - offset = proto.Field( + offset: int = proto.Field( proto.INT64, number=1, ) @@ -721,16 +760,16 @@ class StorageErrorCode(proto.Enum): OFFSET_ALREADY_EXISTS = 8 OFFSET_OUT_OF_RANGE = 9 - code = proto.Field( + code: StorageErrorCode = proto.Field( proto.ENUM, number=1, enum=StorageErrorCode, ) - entity = proto.Field( + entity: str = proto.Field( proto.STRING, number=2, ) - error_message = proto.Field( + error_message: str = proto.Field( proto.STRING, number=3, ) @@ -754,16 +793,16 @@ class RowErrorCode(proto.Enum): ROW_ERROR_CODE_UNSPECIFIED = 0 FIELDS_ERROR = 1 - index = proto.Field( + index: int = proto.Field( proto.INT64, number=1, ) - code = proto.Field( + code: RowErrorCode = proto.Field( proto.ENUM, number=2, enum=RowErrorCode, ) - message = proto.Field( + message: str = proto.Field( proto.STRING, number=3, ) diff --git a/google/cloud/bigquery_storage_v1/types/stream.py b/google/cloud/bigquery_storage_v1/types/stream.py index 18bfcd3e..a9eaa157 100644 --- a/google/cloud/bigquery_storage_v1/types/stream.py +++ b/google/cloud/bigquery_storage_v1/types/stream.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigquery_storage_v1.types import arrow @@ -89,7 +91,7 @@ class ReadSession(proto.Message): read_options (google.cloud.bigquery_storage_v1.types.ReadSession.TableReadOptions): Optional. Read options for this session (e.g. column selection, filters). - streams (Sequence[google.cloud.bigquery_storage_v1.types.ReadStream]): + streams (MutableSequence[google.cloud.bigquery_storage_v1.types.ReadStream]): Output only. A list of streams created with the session. At least one stream is created with the session. In the @@ -123,7 +125,7 @@ class TableModifiers(proto.Message): interpreted as now. """ - snapshot_time = proto.Field( + snapshot_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, @@ -140,7 +142,7 @@ class TableReadOptions(proto.Message): .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: - selected_fields (Sequence[str]): + selected_fields (MutableSequence[str]): Optional. The names of the fields in the table to be returned. If no field names are specified, then all fields in the table are returned. @@ -196,77 +198,77 @@ class TableReadOptions(proto.Message): This field is a member of `oneof`_ ``output_format_serialization_options``. """ - selected_fields = proto.RepeatedField( + selected_fields: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) - row_restriction = proto.Field( + row_restriction: str = proto.Field( proto.STRING, number=2, ) - arrow_serialization_options = proto.Field( + arrow_serialization_options: arrow.ArrowSerializationOptions = proto.Field( proto.MESSAGE, number=3, oneof="output_format_serialization_options", message=arrow.ArrowSerializationOptions, ) - avro_serialization_options = proto.Field( + avro_serialization_options: avro.AvroSerializationOptions = proto.Field( proto.MESSAGE, number=4, oneof="output_format_serialization_options", message=avro.AvroSerializationOptions, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - expire_time = proto.Field( + expire_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - data_format = proto.Field( + data_format: "DataFormat" = proto.Field( proto.ENUM, number=3, enum="DataFormat", ) - avro_schema = proto.Field( + avro_schema: avro.AvroSchema = proto.Field( proto.MESSAGE, number=4, oneof="schema", message=avro.AvroSchema, ) - arrow_schema = proto.Field( + arrow_schema: arrow.ArrowSchema = proto.Field( proto.MESSAGE, number=5, oneof="schema", message=arrow.ArrowSchema, ) - table = proto.Field( + table: str = proto.Field( proto.STRING, number=6, ) - table_modifiers = proto.Field( + table_modifiers: TableModifiers = proto.Field( proto.MESSAGE, number=7, message=TableModifiers, ) - read_options = proto.Field( + read_options: TableReadOptions = proto.Field( proto.MESSAGE, number=8, message=TableReadOptions, ) - streams = proto.RepeatedField( + streams: MutableSequence["ReadStream"] = proto.RepeatedField( proto.MESSAGE, number=10, message="ReadStream", ) - estimated_total_bytes_scanned = proto.Field( + estimated_total_bytes_scanned: int = proto.Field( proto.INT64, number=12, ) - trace_id = proto.Field( + trace_id: str = proto.Field( proto.STRING, number=13, ) @@ -283,7 +285,7 @@ class ReadStream(proto.Message): ``projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -334,36 +336,36 @@ class WriteMode(proto.Enum): WRITE_MODE_UNSPECIFIED = 0 INSERT = 1 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - type_ = proto.Field( + type_: Type = proto.Field( proto.ENUM, number=2, enum=Type, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - commit_time = proto.Field( + commit_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - table_schema = proto.Field( + table_schema: gcbs_table.TableSchema = proto.Field( proto.MESSAGE, number=5, message=gcbs_table.TableSchema, ) - write_mode = proto.Field( + write_mode: WriteMode = proto.Field( proto.ENUM, number=7, enum=WriteMode, ) - location = proto.Field( + location: str = proto.Field( proto.STRING, number=8, ) diff --git a/google/cloud/bigquery_storage_v1/types/table.py b/google/cloud/bigquery_storage_v1/types/table.py index 006ac1c5..5b1f517c 100644 --- a/google/cloud/bigquery_storage_v1/types/table.py +++ b/google/cloud/bigquery_storage_v1/types/table.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -31,11 +33,11 @@ class TableSchema(proto.Message): necessary to generate valid message to write to BigQuery. Attributes: - fields (Sequence[google.cloud.bigquery_storage_v1.types.TableFieldSchema]): + fields (MutableSequence[google.cloud.bigquery_storage_v1.types.TableFieldSchema]): Describes the fields in a table. """ - fields = proto.RepeatedField( + fields: MutableSequence["TableFieldSchema"] = proto.RepeatedField( proto.MESSAGE, number=1, message="TableFieldSchema", @@ -57,7 +59,7 @@ class TableFieldSchema(proto.Message): mode (google.cloud.bigquery_storage_v1.types.TableFieldSchema.Mode): Optional. The field mode. The default value is NULLABLE. - fields (Sequence[google.cloud.bigquery_storage_v1.types.TableFieldSchema]): + fields (MutableSequence[google.cloud.bigquery_storage_v1.types.TableFieldSchema]): Optional. Describes the nested schema fields if the type property is set to STRUCT. description (str): @@ -146,38 +148,38 @@ class Mode(proto.Enum): REQUIRED = 2 REPEATED = 3 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - type_ = proto.Field( + type_: Type = proto.Field( proto.ENUM, number=2, enum=Type, ) - mode = proto.Field( + mode: Mode = proto.Field( proto.ENUM, number=3, enum=Mode, ) - fields = proto.RepeatedField( + fields: MutableSequence["TableFieldSchema"] = proto.RepeatedField( proto.MESSAGE, number=4, message="TableFieldSchema", ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=6, ) - max_length = proto.Field( + max_length: int = proto.Field( proto.INT64, number=7, ) - precision = proto.Field( + precision: int = proto.Field( proto.INT64, number=8, ) - scale = proto.Field( + scale: int = proto.Field( proto.INT64, number=9, ) diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/async_client.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/async_client.py index 3b17b9a1..dc8e9086 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/async_client.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/async_client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, AsyncIterable, Awaitable, @@ -175,9 +177,9 @@ def transport(self) -> BigQueryReadTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigQueryReadTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query read client. @@ -221,13 +223,13 @@ def __init__( async def create_read_session( self, - request: Union[storage.CreateReadSessionRequest, dict] = None, + request: Optional[Union[storage.CreateReadSessionRequest, dict]] = None, *, - parent: str = None, - read_session: stream.ReadSession = None, - max_stream_count: int = None, + parent: Optional[str] = None, + read_session: Optional[stream.ReadSession] = None, + max_stream_count: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.ReadSession: r"""Creates a new read session. A read session divides @@ -281,7 +283,7 @@ async def sample_create_read_session(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.CreateReadSessionRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.CreateReadSessionRequest, dict]]): The request object. Request message for `CreateReadSession`. parent (:class:`str`): @@ -385,12 +387,12 @@ async def sample_create_read_session(): def read_rows( self, - request: Union[storage.ReadRowsRequest, dict] = None, + request: Optional[Union[storage.ReadRowsRequest, dict]] = None, *, - read_stream: str = None, - offset: int = None, + read_stream: Optional[str] = None, + offset: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[storage.ReadRowsResponse]]: r"""Reads rows from the stream in the format prescribed @@ -430,7 +432,7 @@ async def sample_read_rows(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.ReadRowsRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.ReadRowsRequest, dict]]): The request object. Request message for `ReadRows`. read_stream (:class:`str`): Required. Stream to read rows from. @@ -516,10 +518,10 @@ async def sample_read_rows(): async def split_read_stream( self, - request: Union[storage.SplitReadStreamRequest, dict] = None, + request: Optional[Union[storage.SplitReadStreamRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.SplitReadStreamResponse: r"""Splits a given ``ReadStream`` into two ``ReadStream`` objects. @@ -563,7 +565,7 @@ async def sample_split_read_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamRequest, dict]]): The request object. Request message for `SplitReadStream`. retry (google.api_core.retry.Retry): Designation of what errors, if any, diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/client.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/client.py index 6efd02ac..581ff73b 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/client.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/client.py @@ -16,7 +16,19 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Iterable, Sequence, Tuple, Type, Union +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Iterable, + Sequence, + Tuple, + Type, + Union, + cast, +) import pkg_resources from google.api_core import client_options as client_options_lib @@ -58,7 +70,7 @@ class BigQueryReadClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigQueryReadTransport]: """Returns an appropriate transport class. @@ -383,8 +395,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigQueryReadTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigQueryReadTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query read client. @@ -398,7 +410,7 @@ def __init__( transport (Union[str, BigQueryReadTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -428,6 +440,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -480,13 +493,13 @@ def __init__( def create_read_session( self, - request: Union[storage.CreateReadSessionRequest, dict] = None, + request: Optional[Union[storage.CreateReadSessionRequest, dict]] = None, *, - parent: str = None, - read_session: stream.ReadSession = None, - max_stream_count: int = None, + parent: Optional[str] = None, + read_session: Optional[stream.ReadSession] = None, + max_stream_count: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.ReadSession: r"""Creates a new read session. A read session divides @@ -634,12 +647,12 @@ def sample_create_read_session(): def read_rows( self, - request: Union[storage.ReadRowsRequest, dict] = None, + request: Optional[Union[storage.ReadRowsRequest, dict]] = None, *, - read_stream: str = None, - offset: int = None, + read_stream: Optional[str] = None, + offset: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[storage.ReadRowsResponse]: r"""Reads rows from the stream in the format prescribed @@ -756,10 +769,10 @@ def sample_read_rows(): def split_read_stream( self, - request: Union[storage.SplitReadStreamRequest, dict] = None, + request: Optional[Union[storage.SplitReadStreamRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.SplitReadStreamResponse: r"""Splits a given ``ReadStream`` into two ``ReadStream`` objects. diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/base.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/base.py index 6433df14..647c4c40 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/base.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/base.py @@ -52,7 +52,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc.py index 794d4137..021399b5 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc.py @@ -51,14 +51,14 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -185,8 +185,8 @@ def __init__( def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py index 2a42b07d..b63d2c32 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_read/transports/grpc_asyncio.py @@ -53,7 +53,7 @@ class BigQueryReadGrpcAsyncIOTransport(BigQueryReadTransport): def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -96,15 +96,15 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py index 79cfad22..f13e8fc6 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/async_client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, AsyncIterable, Awaitable, @@ -176,9 +178,9 @@ def transport(self) -> BigQueryWriteTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BigQueryWriteTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query write client. @@ -222,12 +224,12 @@ def __init__( async def create_write_stream( self, - request: Union[storage.CreateWriteStreamRequest, dict] = None, + request: Optional[Union[storage.CreateWriteStreamRequest, dict]] = None, *, - parent: str = None, - write_stream: stream.WriteStream = None, + parent: Optional[str] = None, + write_stream: Optional[stream.WriteStream] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Creates a write stream to the given table. Additionally, every @@ -265,7 +267,7 @@ async def sample_create_write_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest, dict]]): The request object. Request message for `CreateWriteStream`. parent (:class:`str`): @@ -351,10 +353,10 @@ async def sample_create_write_stream(): def append_rows( self, - requests: AsyncIterator[storage.AppendRowsRequest] = None, + requests: Optional[AsyncIterator[storage.AppendRowsRequest]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[storage.AppendRowsResponse]]: r"""Appends data to the given stream. @@ -465,11 +467,11 @@ def request_generator(): async def get_write_stream( self, - request: Union[storage.GetWriteStreamRequest, dict] = None, + request: Optional[Union[storage.GetWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Gets a write stream. @@ -501,7 +503,7 @@ async def sample_get_write_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest, dict]]): The request object. Request message for `GetWriteStreamRequest`. name (:class:`str`): @@ -578,11 +580,11 @@ async def sample_get_write_stream(): async def finalize_write_stream( self, - request: Union[storage.FinalizeWriteStreamRequest, dict] = None, + request: Optional[Union[storage.FinalizeWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FinalizeWriteStreamResponse: r"""Finalize a write stream so that no new data can be appended to @@ -615,7 +617,7 @@ async def sample_finalize_write_stream(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest, dict]]): The request object. Request message for invoking `FinalizeWriteStream`. name (:class:`str`): @@ -689,11 +691,11 @@ async def sample_finalize_write_stream(): async def batch_commit_write_streams( self, - request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None, + request: Optional[Union[storage.BatchCommitWriteStreamsRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.BatchCommitWriteStreamsResponse: r"""Atomically commits a group of ``PENDING`` streams that belong to @@ -730,7 +732,7 @@ async def sample_batch_commit_write_streams(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest, dict]]): The request object. Request message for `BatchCommitWriteStreams`. parent (:class:`str`): @@ -805,11 +807,11 @@ async def sample_batch_commit_write_streams(): async def flush_rows( self, - request: Union[storage.FlushRowsRequest, dict] = None, + request: Optional[Union[storage.FlushRowsRequest, dict]] = None, *, - write_stream: str = None, + write_stream: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FlushRowsResponse: r"""Flushes rows to a BUFFERED stream. If users are appending rows @@ -846,7 +848,7 @@ async def sample_flush_rows(): print(response) Args: - request (Union[google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest, dict]): + request (Optional[Union[google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest, dict]]): The request object. Request message for `FlushRows`. write_stream (:class:`str`): Required. The stream that is the diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/client.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/client.py index 7e030970..fbeea077 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/client.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/client.py @@ -19,6 +19,8 @@ from typing import ( Dict, Mapping, + MutableMapping, + MutableSequence, Optional, Iterable, Iterator, @@ -26,6 +28,7 @@ Tuple, Type, Union, + cast, ) import pkg_resources @@ -68,7 +71,7 @@ class BigQueryWriteClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BigQueryWriteTransport]: """Returns an appropriate transport class. @@ -369,8 +372,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigQueryWriteTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BigQueryWriteTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the big query write client. @@ -384,7 +387,7 @@ def __init__( transport (Union[str, BigQueryWriteTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -414,6 +417,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -466,12 +470,12 @@ def __init__( def create_write_stream( self, - request: Union[storage.CreateWriteStreamRequest, dict] = None, + request: Optional[Union[storage.CreateWriteStreamRequest, dict]] = None, *, - parent: str = None, - write_stream: stream.WriteStream = None, + parent: Optional[str] = None, + write_stream: Optional[stream.WriteStream] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Creates a write stream to the given table. Additionally, every @@ -584,10 +588,10 @@ def sample_create_write_stream(): def append_rows( self, - requests: Iterator[storage.AppendRowsRequest] = None, + requests: Optional[Iterator[storage.AppendRowsRequest]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[storage.AppendRowsResponse]: r"""Appends data to the given stream. @@ -684,11 +688,11 @@ def request_generator(): def get_write_stream( self, - request: Union[storage.GetWriteStreamRequest, dict] = None, + request: Optional[Union[storage.GetWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> stream.WriteStream: r"""Gets a write stream. @@ -787,11 +791,11 @@ def sample_get_write_stream(): def finalize_write_stream( self, - request: Union[storage.FinalizeWriteStreamRequest, dict] = None, + request: Optional[Union[storage.FinalizeWriteStreamRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FinalizeWriteStreamResponse: r"""Finalize a write stream so that no new data can be appended to @@ -888,11 +892,11 @@ def sample_finalize_write_stream(): def batch_commit_write_streams( self, - request: Union[storage.BatchCommitWriteStreamsRequest, dict] = None, + request: Optional[Union[storage.BatchCommitWriteStreamsRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.BatchCommitWriteStreamsResponse: r"""Atomically commits a group of ``PENDING`` streams that belong to @@ -996,11 +1000,11 @@ def sample_batch_commit_write_streams(): def flush_rows( self, - request: Union[storage.FlushRowsRequest, dict] = None, + request: Optional[Union[storage.FlushRowsRequest, dict]] = None, *, - write_stream: str = None, + write_stream: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> storage.FlushRowsResponse: r"""Flushes rows to a BUFFERED stream. If users are appending rows diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/base.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/base.py index 57067ceb..673d44a9 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/base.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/base.py @@ -53,7 +53,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc.py index eb65380c..4300e8eb 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc.py @@ -49,14 +49,14 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -183,8 +183,8 @@ def __init__( def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc_asyncio.py b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc_asyncio.py index e8a4ed82..fc0cafee 100644 --- a/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc_asyncio.py +++ b/google/cloud/bigquery_storage_v1beta2/services/big_query_write/transports/grpc_asyncio.py @@ -51,7 +51,7 @@ class BigQueryWriteGrpcAsyncIOTransport(BigQueryWriteTransport): def create_channel( cls, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -94,15 +94,15 @@ def __init__( self, *, host: str = "bigquerystorage.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/bigquery_storage_v1beta2/types/arrow.py b/google/cloud/bigquery_storage_v1beta2/types/arrow.py index 3087b823..1e71aec1 100644 --- a/google/cloud/bigquery_storage_v1beta2/types/arrow.py +++ b/google/cloud/bigquery_storage_v1beta2/types/arrow.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -38,7 +40,7 @@ class ArrowSchema(proto.Message): IPC serialized Arrow schema. """ - serialized_schema = proto.Field( + serialized_schema: bytes = proto.Field( proto.BYTES, number=1, ) @@ -52,7 +54,7 @@ class ArrowRecordBatch(proto.Message): IPC-serialized Arrow RecordBatch. """ - serialized_record_batch = proto.Field( + serialized_record_batch: bytes = proto.Field( proto.BYTES, number=1, ) @@ -72,7 +74,7 @@ class Format(proto.Enum): ARROW_0_14 = 1 ARROW_0_15 = 2 - format_ = proto.Field( + format_: Format = proto.Field( proto.ENUM, number=1, enum=Format, diff --git a/google/cloud/bigquery_storage_v1beta2/types/avro.py b/google/cloud/bigquery_storage_v1beta2/types/avro.py index 4865800c..b4c977c1 100644 --- a/google/cloud/bigquery_storage_v1beta2/types/avro.py +++ b/google/cloud/bigquery_storage_v1beta2/types/avro.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -34,7 +36,7 @@ class AvroSchema(proto.Message): https://avro.apache.org/docs/1.8.1/spec.html. """ - schema = proto.Field( + schema: str = proto.Field( proto.STRING, number=1, ) @@ -48,7 +50,7 @@ class AvroRows(proto.Message): Binary serialized rows in a block. """ - serialized_binary_rows = proto.Field( + serialized_binary_rows: bytes = proto.Field( proto.BYTES, number=1, ) diff --git a/google/cloud/bigquery_storage_v1beta2/types/protobuf.py b/google/cloud/bigquery_storage_v1beta2/types/protobuf.py index 7f84063d..ec77a4f4 100644 --- a/google/cloud/bigquery_storage_v1beta2/types/protobuf.py +++ b/google/cloud/bigquery_storage_v1beta2/types/protobuf.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import descriptor_pb2 # type: ignore @@ -40,7 +42,7 @@ class ProtoSchema(proto.Message): (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf). """ - proto_descriptor = proto.Field( + proto_descriptor: descriptor_pb2.DescriptorProto = proto.Field( proto.MESSAGE, number=1, message=descriptor_pb2.DescriptorProto, @@ -51,7 +53,7 @@ class ProtoRows(proto.Message): r""" Attributes: - serialized_rows (Sequence[bytes]): + serialized_rows (MutableSequence[bytes]): A sequence of rows serialized as a Protocol Buffer. See @@ -60,7 +62,7 @@ class ProtoRows(proto.Message): field. """ - serialized_rows = proto.RepeatedField( + serialized_rows: MutableSequence[bytes] = proto.RepeatedField( proto.BYTES, number=1, ) diff --git a/google/cloud/bigquery_storage_v1beta2/types/storage.py b/google/cloud/bigquery_storage_v1beta2/types/storage.py index 1b4fd883..bfd37908 100644 --- a/google/cloud/bigquery_storage_v1beta2/types/storage.py +++ b/google/cloud/bigquery_storage_v1beta2/types/storage.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigquery_storage_v1beta2.types import arrow @@ -73,16 +75,16 @@ class CreateReadSessionRequest(proto.Message): Streams must be read starting from offset 0. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - read_session = proto.Field( + read_session: stream.ReadSession = proto.Field( proto.MESSAGE, number=2, message=stream.ReadSession, ) - max_stream_count = proto.Field( + max_stream_count: int = proto.Field( proto.INT32, number=3, ) @@ -101,11 +103,11 @@ class ReadRowsRequest(proto.Message): reading from offset zero. """ - read_stream = proto.Field( + read_stream: str = proto.Field( proto.STRING, number=1, ) - offset = proto.Field( + offset: int = proto.Field( proto.INT64, number=2, ) @@ -121,7 +123,7 @@ class ThrottleState(proto.Message): throttled. """ - throttle_percent = proto.Field( + throttle_percent: int = proto.Field( proto.INT32, number=1, ) @@ -158,16 +160,16 @@ class Progress(proto.Message): includes the rows in the current response. """ - at_response_start = proto.Field( + at_response_start: float = proto.Field( proto.DOUBLE, number=1, ) - at_response_end = proto.Field( + at_response_end: float = proto.Field( proto.DOUBLE, number=2, ) - progress = proto.Field( + progress: Progress = proto.Field( proto.MESSAGE, number=2, message=Progress, @@ -213,39 +215,39 @@ class ReadRowsResponse(proto.Message): This field is a member of `oneof`_ ``schema``. """ - avro_rows = proto.Field( + avro_rows: avro.AvroRows = proto.Field( proto.MESSAGE, number=3, oneof="rows", message=avro.AvroRows, ) - arrow_record_batch = proto.Field( + arrow_record_batch: arrow.ArrowRecordBatch = proto.Field( proto.MESSAGE, number=4, oneof="rows", message=arrow.ArrowRecordBatch, ) - row_count = proto.Field( + row_count: int = proto.Field( proto.INT64, number=6, ) - stats = proto.Field( + stats: "StreamStats" = proto.Field( proto.MESSAGE, number=2, message="StreamStats", ) - throttle_state = proto.Field( + throttle_state: "ThrottleState" = proto.Field( proto.MESSAGE, number=5, message="ThrottleState", ) - avro_schema = proto.Field( + avro_schema: avro.AvroSchema = proto.Field( proto.MESSAGE, number=7, oneof="schema", message=avro.AvroSchema, ) - arrow_schema = proto.Field( + arrow_schema: arrow.ArrowSchema = proto.Field( proto.MESSAGE, number=8, oneof="schema", @@ -274,11 +276,11 @@ class SplitReadStreamRequest(proto.Message): side. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - fraction = proto.Field( + fraction: float = proto.Field( proto.DOUBLE, number=2, ) @@ -298,12 +300,12 @@ class SplitReadStreamResponse(proto.Message): original stream can no longer be split. """ - primary_stream = proto.Field( + primary_stream: stream.ReadStream = proto.Field( proto.MESSAGE, number=1, message=stream.ReadStream, ) - remainder_stream = proto.Field( + remainder_stream: stream.ReadStream = proto.Field( proto.MESSAGE, number=2, message=stream.ReadStream, @@ -322,11 +324,11 @@ class CreateWriteStreamRequest(proto.Message): Required. Stream to be created. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - write_stream = proto.Field( + write_stream: stream.WriteStream = proto.Field( proto.MESSAGE, number=2, message=stream.WriteStream, @@ -373,33 +375,33 @@ class ProtoData(proto.Message): format. """ - writer_schema = proto.Field( + writer_schema: protobuf.ProtoSchema = proto.Field( proto.MESSAGE, number=1, message=protobuf.ProtoSchema, ) - rows = proto.Field( + rows: protobuf.ProtoRows = proto.Field( proto.MESSAGE, number=2, message=protobuf.ProtoRows, ) - write_stream = proto.Field( + write_stream: str = proto.Field( proto.STRING, number=1, ) - offset = proto.Field( + offset: wrappers_pb2.Int64Value = proto.Field( proto.MESSAGE, number=2, message=wrappers_pb2.Int64Value, ) - proto_rows = proto.Field( + proto_rows: ProtoData = proto.Field( proto.MESSAGE, number=4, oneof="rows", message=ProtoData, ) - trace_id = proto.Field( + trace_id: str = proto.Field( proto.STRING, number=6, ) @@ -463,25 +465,25 @@ class AppendResult(proto.Message): appending using default streams. """ - offset = proto.Field( + offset: wrappers_pb2.Int64Value = proto.Field( proto.MESSAGE, number=1, message=wrappers_pb2.Int64Value, ) - append_result = proto.Field( + append_result: AppendResult = proto.Field( proto.MESSAGE, number=1, oneof="response", message=AppendResult, ) - error = proto.Field( + error: status_pb2.Status = proto.Field( proto.MESSAGE, number=2, oneof="response", message=status_pb2.Status, ) - updated_schema = proto.Field( + updated_schema: table.TableSchema = proto.Field( proto.MESSAGE, number=3, message=table.TableSchema, @@ -497,7 +499,7 @@ class GetWriteStreamRequest(proto.Message): ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -511,16 +513,16 @@ class BatchCommitWriteStreamsRequest(proto.Message): Required. Parent table that all the streams should belong to, in the form of ``projects/{project}/datasets/{dataset}/tables/{table}``. - write_streams (Sequence[str]): + write_streams (MutableSequence[str]): Required. The group of streams that will be committed atomically. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - write_streams = proto.RepeatedField( + write_streams: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) @@ -535,7 +537,7 @@ class BatchCommitWriteStreamsResponse(proto.Message): granularity. This field will only exist when there are no stream errors. **Note** if this field is not set, it means the commit was not successful. - stream_errors (Sequence[google.cloud.bigquery_storage_v1beta2.types.StorageError]): + stream_errors (MutableSequence[google.cloud.bigquery_storage_v1beta2.types.StorageError]): Stream level error if commit failed. Only streams with error will be in the list. If empty, there is no error and all streams are @@ -544,12 +546,12 @@ class BatchCommitWriteStreamsResponse(proto.Message): due to atomicity guarantee. """ - commit_time = proto.Field( + commit_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, ) - stream_errors = proto.RepeatedField( + stream_errors: MutableSequence["StorageError"] = proto.RepeatedField( proto.MESSAGE, number=2, message="StorageError", @@ -565,7 +567,7 @@ class FinalizeWriteStreamRequest(proto.Message): ``projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -579,7 +581,7 @@ class FinalizeWriteStreamResponse(proto.Message): Number of rows in the finalized stream. """ - row_count = proto.Field( + row_count: int = proto.Field( proto.INT64, number=1, ) @@ -598,11 +600,11 @@ class FlushRowsRequest(proto.Message): be flushed. """ - write_stream = proto.Field( + write_stream: str = proto.Field( proto.STRING, number=1, ) - offset = proto.Field( + offset: wrappers_pb2.Int64Value = proto.Field( proto.MESSAGE, number=2, message=wrappers_pb2.Int64Value, @@ -618,7 +620,7 @@ class FlushRowsResponse(proto.Message): offset) are flushed. """ - offset = proto.Field( + offset: int = proto.Field( proto.INT64, number=1, ) @@ -650,16 +652,16 @@ class StorageErrorCode(proto.Enum): INVALID_STREAM_STATE = 5 STREAM_FINALIZED = 6 - code = proto.Field( + code: StorageErrorCode = proto.Field( proto.ENUM, number=1, enum=StorageErrorCode, ) - entity = proto.Field( + entity: str = proto.Field( proto.STRING, number=2, ) - error_message = proto.Field( + error_message: str = proto.Field( proto.STRING, number=3, ) diff --git a/google/cloud/bigquery_storage_v1beta2/types/stream.py b/google/cloud/bigquery_storage_v1beta2/types/stream.py index e1292d2b..7edadaa6 100644 --- a/google/cloud/bigquery_storage_v1beta2/types/stream.py +++ b/google/cloud/bigquery_storage_v1beta2/types/stream.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.bigquery_storage_v1beta2.types import arrow @@ -78,7 +80,7 @@ class ReadSession(proto.Message): read_options (google.cloud.bigquery_storage_v1beta2.types.ReadSession.TableReadOptions): Optional. Read options for this session (e.g. column selection, filters). - streams (Sequence[google.cloud.bigquery_storage_v1beta2.types.ReadStream]): + streams (MutableSequence[google.cloud.bigquery_storage_v1beta2.types.ReadStream]): Output only. A list of streams created with the session. At least one stream is created with the session. In the @@ -97,7 +99,7 @@ class TableModifiers(proto.Message): interpreted as now. """ - snapshot_time = proto.Field( + snapshot_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp, @@ -107,7 +109,7 @@ class TableReadOptions(proto.Message): r"""Options dictating how we read a table. Attributes: - selected_fields (Sequence[str]): + selected_fields (MutableSequence[str]): Names of the fields in the table that should be read. If empty, all fields will be read. If the specified field is a nested field, all the sub-fields in the field will be @@ -128,61 +130,61 @@ class TableReadOptions(proto.Message): Arrow output format. """ - selected_fields = proto.RepeatedField( + selected_fields: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) - row_restriction = proto.Field( + row_restriction: str = proto.Field( proto.STRING, number=2, ) - arrow_serialization_options = proto.Field( + arrow_serialization_options: arrow.ArrowSerializationOptions = proto.Field( proto.MESSAGE, number=3, message=arrow.ArrowSerializationOptions, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - expire_time = proto.Field( + expire_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp, ) - data_format = proto.Field( + data_format: "DataFormat" = proto.Field( proto.ENUM, number=3, enum="DataFormat", ) - avro_schema = proto.Field( + avro_schema: avro.AvroSchema = proto.Field( proto.MESSAGE, number=4, oneof="schema", message=avro.AvroSchema, ) - arrow_schema = proto.Field( + arrow_schema: arrow.ArrowSchema = proto.Field( proto.MESSAGE, number=5, oneof="schema", message=arrow.ArrowSchema, ) - table = proto.Field( + table: str = proto.Field( proto.STRING, number=6, ) - table_modifiers = proto.Field( + table_modifiers: TableModifiers = proto.Field( proto.MESSAGE, number=7, message=TableModifiers, ) - read_options = proto.Field( + read_options: TableReadOptions = proto.Field( proto.MESSAGE, number=8, message=TableReadOptions, ) - streams = proto.RepeatedField( + streams: MutableSequence["ReadStream"] = proto.RepeatedField( proto.MESSAGE, number=10, message="ReadStream", @@ -200,7 +202,7 @@ class ReadStream(proto.Message): ``projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}``. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -239,26 +241,26 @@ class Type(proto.Enum): PENDING = 2 BUFFERED = 3 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - type_ = proto.Field( + type_: Type = proto.Field( proto.ENUM, number=2, enum=Type, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - commit_time = proto.Field( + commit_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - table_schema = proto.Field( + table_schema: gcbs_table.TableSchema = proto.Field( proto.MESSAGE, number=5, message=gcbs_table.TableSchema, diff --git a/google/cloud/bigquery_storage_v1beta2/types/table.py b/google/cloud/bigquery_storage_v1beta2/types/table.py index 0cc26c05..6e54c96a 100644 --- a/google/cloud/bigquery_storage_v1beta2/types/table.py +++ b/google/cloud/bigquery_storage_v1beta2/types/table.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -29,11 +31,11 @@ class TableSchema(proto.Message): r"""Schema of a table Attributes: - fields (Sequence[google.cloud.bigquery_storage_v1beta2.types.TableFieldSchema]): + fields (MutableSequence[google.cloud.bigquery_storage_v1beta2.types.TableFieldSchema]): Describes the fields in a table. """ - fields = proto.RepeatedField( + fields: MutableSequence["TableFieldSchema"] = proto.RepeatedField( proto.MESSAGE, number=1, message="TableFieldSchema", @@ -54,7 +56,7 @@ class TableFieldSchema(proto.Message): mode (google.cloud.bigquery_storage_v1beta2.types.TableFieldSchema.Mode): Optional. The field mode. The default value is NULLABLE. - fields (Sequence[google.cloud.bigquery_storage_v1beta2.types.TableFieldSchema]): + fields (MutableSequence[google.cloud.bigquery_storage_v1beta2.types.TableFieldSchema]): Optional. Describes the nested schema fields if the type property is set to STRUCT. description (str): @@ -88,26 +90,26 @@ class Mode(proto.Enum): REQUIRED = 2 REPEATED = 3 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - type_ = proto.Field( + type_: Type = proto.Field( proto.ENUM, number=2, enum=Type, ) - mode = proto.Field( + mode: Mode = proto.Field( proto.ENUM, number=3, enum=Mode, ) - fields = proto.RepeatedField( + fields: MutableSequence["TableFieldSchema"] = proto.RepeatedField( proto.MESSAGE, number=4, message="TableFieldSchema", ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=6, ) diff --git a/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1.json new file mode 100644 index 00000000..c1d15715 --- /dev/null +++ b/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1.json @@ -0,0 +1,1480 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.bigquery.storage.v1", + "version": "v1" + } + ], + "language": "PYTHON", + "name": "google-cloud-bigquery-storage", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadAsyncClient", + "shortName": "BigQueryReadAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadAsyncClient.create_read_session", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "CreateReadSession" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.CreateReadSessionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "read_session", + "type": "google.cloud.bigquery_storage_v1.types.ReadSession" + }, + { + "name": "max_stream_count", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.ReadSession", + "shortName": "create_read_session" + }, + "description": "Sample for CreateReadSession", + "file": "bigquerystorage_v1_generated_big_query_read_create_read_session_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_read_create_read_session_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadClient", + "shortName": "BigQueryReadClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadClient.create_read_session", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.CreateReadSession", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "CreateReadSession" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.CreateReadSessionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "read_session", + "type": "google.cloud.bigquery_storage_v1.types.ReadSession" + }, + { + "name": "max_stream_count", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.ReadSession", + "shortName": "create_read_session" + }, + "description": "Sample for CreateReadSession", + "file": "bigquerystorage_v1_generated_big_query_read_create_read_session_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_CreateReadSession_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_read_create_read_session_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadAsyncClient", + "shortName": "BigQueryReadAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadAsyncClient.read_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "ReadRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.ReadRowsRequest" + }, + { + "name": "read_stream", + "type": "str" + }, + { + "name": "offset", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1.types.ReadRowsResponse]", + "shortName": "read_rows" + }, + "description": "Sample for ReadRows", + "file": "bigquerystorage_v1_generated_big_query_read_read_rows_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_ReadRows_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_read_read_rows_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadClient", + "shortName": "BigQueryReadClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadClient.read_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.ReadRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "ReadRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.ReadRowsRequest" + }, + { + "name": "read_stream", + "type": "str" + }, + { + "name": "offset", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1.types.ReadRowsResponse]", + "shortName": "read_rows" + }, + "description": "Sample for ReadRows", + "file": "bigquerystorage_v1_generated_big_query_read_read_rows_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_ReadRows_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_read_read_rows_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadAsyncClient", + "shortName": "BigQueryReadAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadAsyncClient.split_read_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "SplitReadStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.SplitReadStreamRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.SplitReadStreamResponse", + "shortName": "split_read_stream" + }, + "description": "Sample for SplitReadStream", + "file": "bigquerystorage_v1_generated_big_query_read_split_read_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_read_split_read_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadClient", + "shortName": "BigQueryReadClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryReadClient.split_read_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead.SplitReadStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "SplitReadStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.SplitReadStreamRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.SplitReadStreamResponse", + "shortName": "split_read_stream" + }, + "description": "Sample for SplitReadStream", + "file": "bigquerystorage_v1_generated_big_query_read_split_read_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryRead_SplitReadStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_read_split_read_stream_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient.append_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "AppendRows" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.bigquery_storage_v1.types.AppendRowsRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1.types.AppendRowsResponse]", + "shortName": "append_rows" + }, + "description": "Sample for AppendRows", + "file": "bigquerystorage_v1_generated_big_query_write_append_rows_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_AppendRows_async", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 55, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 56, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_append_rows_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient.append_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.AppendRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "AppendRows" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.bigquery_storage_v1.types.AppendRowsRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1.types.AppendRowsResponse]", + "shortName": "append_rows" + }, + "description": "Sample for AppendRows", + "file": "bigquerystorage_v1_generated_big_query_write_append_rows_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_AppendRows_sync", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 55, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 56, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_append_rows_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient.batch_commit_write_streams", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "BatchCommitWriteStreams" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsResponse", + "shortName": "batch_commit_write_streams" + }, + "description": "Sample for BatchCommitWriteStreams", + "file": "bigquerystorage_v1_generated_big_query_write_batch_commit_write_streams_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_batch_commit_write_streams_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient.batch_commit_write_streams", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.BatchCommitWriteStreams", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "BatchCommitWriteStreams" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.BatchCommitWriteStreamsResponse", + "shortName": "batch_commit_write_streams" + }, + "description": "Sample for BatchCommitWriteStreams", + "file": "bigquerystorage_v1_generated_big_query_write_batch_commit_write_streams_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_BatchCommitWriteStreams_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_batch_commit_write_streams_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient.create_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "CreateWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.CreateWriteStreamRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "write_stream", + "type": "google.cloud.bigquery_storage_v1.types.WriteStream" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.WriteStream", + "shortName": "create_write_stream" + }, + "description": "Sample for CreateWriteStream", + "file": "bigquerystorage_v1_generated_big_query_write_create_write_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_create_write_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient.create_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.CreateWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "CreateWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.CreateWriteStreamRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "write_stream", + "type": "google.cloud.bigquery_storage_v1.types.WriteStream" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.WriteStream", + "shortName": "create_write_stream" + }, + "description": "Sample for CreateWriteStream", + "file": "bigquerystorage_v1_generated_big_query_write_create_write_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_CreateWriteStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_create_write_stream_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient.finalize_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FinalizeWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamResponse", + "shortName": "finalize_write_stream" + }, + "description": "Sample for FinalizeWriteStream", + "file": "bigquerystorage_v1_generated_big_query_write_finalize_write_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_finalize_write_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient.finalize_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FinalizeWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FinalizeWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.FinalizeWriteStreamResponse", + "shortName": "finalize_write_stream" + }, + "description": "Sample for FinalizeWriteStream", + "file": "bigquerystorage_v1_generated_big_query_write_finalize_write_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FinalizeWriteStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_finalize_write_stream_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient.flush_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FlushRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.FlushRowsRequest" + }, + { + "name": "write_stream", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.FlushRowsResponse", + "shortName": "flush_rows" + }, + "description": "Sample for FlushRows", + "file": "bigquerystorage_v1_generated_big_query_write_flush_rows_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FlushRows_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_flush_rows_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient.flush_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.FlushRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FlushRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.FlushRowsRequest" + }, + { + "name": "write_stream", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.FlushRowsResponse", + "shortName": "flush_rows" + }, + "description": "Sample for FlushRows", + "file": "bigquerystorage_v1_generated_big_query_write_flush_rows_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_FlushRows_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_flush_rows_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteAsyncClient.get_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "GetWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.GetWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.WriteStream", + "shortName": "get_write_stream" + }, + "description": "Sample for GetWriteStream", + "file": "bigquerystorage_v1_generated_big_query_write_get_write_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_get_write_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1.BigQueryWriteClient.get_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite.GetWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "GetWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1.types.GetWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1.types.WriteStream", + "shortName": "get_write_stream" + }, + "description": "Sample for GetWriteStream", + "file": "bigquerystorage_v1_generated_big_query_write_get_write_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1_generated_BigQueryWrite_GetWriteStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1_generated_big_query_write_get_write_stream_sync.py" + } + ] +} diff --git a/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1beta2.json b/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1beta2.json new file mode 100644 index 00000000..302b7815 --- /dev/null +++ b/samples/generated_samples/snippet_metadata_google.cloud.bigquery.storage.v1beta2.json @@ -0,0 +1,1480 @@ +{ + "clientLibrary": { + "apis": [ + { + "id": "google.cloud.bigquery.storage.v1beta2", + "version": "v1beta2" + } + ], + "language": "PYTHON", + "name": "google-cloud-bigquery-storage", + "version": "0.1.0" + }, + "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadAsyncClient", + "shortName": "BigQueryReadAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadAsyncClient.create_read_session", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.CreateReadSession", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "CreateReadSession" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.CreateReadSessionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "read_session", + "type": "google.cloud.bigquery_storage_v1beta2.types.ReadSession" + }, + { + "name": "max_stream_count", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.ReadSession", + "shortName": "create_read_session" + }, + "description": "Sample for CreateReadSession", + "file": "bigquerystorage_v1beta2_generated_big_query_read_create_read_session_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_CreateReadSession_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_read_create_read_session_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadClient", + "shortName": "BigQueryReadClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadClient.create_read_session", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.CreateReadSession", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "CreateReadSession" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.CreateReadSessionRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "read_session", + "type": "google.cloud.bigquery_storage_v1beta2.types.ReadSession" + }, + { + "name": "max_stream_count", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.ReadSession", + "shortName": "create_read_session" + }, + "description": "Sample for CreateReadSession", + "file": "bigquerystorage_v1beta2_generated_big_query_read_create_read_session_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_CreateReadSession_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_read_create_read_session_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadAsyncClient", + "shortName": "BigQueryReadAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadAsyncClient.read_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.ReadRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "ReadRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.ReadRowsRequest" + }, + { + "name": "read_stream", + "type": "str" + }, + { + "name": "offset", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1beta2.types.ReadRowsResponse]", + "shortName": "read_rows" + }, + "description": "Sample for ReadRows", + "file": "bigquerystorage_v1beta2_generated_big_query_read_read_rows_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_ReadRows_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_read_read_rows_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadClient", + "shortName": "BigQueryReadClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadClient.read_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.ReadRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "ReadRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.ReadRowsRequest" + }, + { + "name": "read_stream", + "type": "str" + }, + { + "name": "offset", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1beta2.types.ReadRowsResponse]", + "shortName": "read_rows" + }, + "description": "Sample for ReadRows", + "file": "bigquerystorage_v1beta2_generated_big_query_read_read_rows_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_ReadRows_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_read_read_rows_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadAsyncClient", + "shortName": "BigQueryReadAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadAsyncClient.split_read_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.SplitReadStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "SplitReadStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamResponse", + "shortName": "split_read_stream" + }, + "description": "Sample for SplitReadStream", + "file": "bigquerystorage_v1beta2_generated_big_query_read_split_read_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_SplitReadStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_read_split_read_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadClient", + "shortName": "BigQueryReadClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryReadClient.split_read_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.SplitReadStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead", + "shortName": "BigQueryRead" + }, + "shortName": "SplitReadStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.SplitReadStreamResponse", + "shortName": "split_read_stream" + }, + "description": "Sample for SplitReadStream", + "file": "bigquerystorage_v1beta2_generated_big_query_read_split_read_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_SplitReadStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_read_split_read_stream_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient.append_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.AppendRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "AppendRows" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.bigquery_storage_v1beta2.types.AppendRowsRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1beta2.types.AppendRowsResponse]", + "shortName": "append_rows" + }, + "description": "Sample for AppendRows", + "file": "bigquerystorage_v1beta2_generated_big_query_write_append_rows_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_async", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 55, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 56, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_append_rows_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient.append_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.AppendRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "AppendRows" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.bigquery_storage_v1beta2.types.AppendRowsRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.bigquery_storage_v1beta2.types.AppendRowsResponse]", + "shortName": "append_rows" + }, + "description": "Sample for AppendRows", + "file": "bigquerystorage_v1beta2_generated_big_query_write_append_rows_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 55, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 56, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_append_rows_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient.batch_commit_write_streams", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.BatchCommitWriteStreams", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "BatchCommitWriteStreams" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsResponse", + "shortName": "batch_commit_write_streams" + }, + "description": "Sample for BatchCommitWriteStreams", + "file": "bigquerystorage_v1beta2_generated_big_query_write_batch_commit_write_streams_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_BatchCommitWriteStreams_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_batch_commit_write_streams_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient.batch_commit_write_streams", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.BatchCommitWriteStreams", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "BatchCommitWriteStreams" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.BatchCommitWriteStreamsResponse", + "shortName": "batch_commit_write_streams" + }, + "description": "Sample for BatchCommitWriteStreams", + "file": "bigquerystorage_v1beta2_generated_big_query_write_batch_commit_write_streams_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_BatchCommitWriteStreams_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_batch_commit_write_streams_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient.create_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.CreateWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "CreateWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "write_stream", + "type": "google.cloud.bigquery_storage_v1beta2.types.WriteStream" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.WriteStream", + "shortName": "create_write_stream" + }, + "description": "Sample for CreateWriteStream", + "file": "bigquerystorage_v1beta2_generated_big_query_write_create_write_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_CreateWriteStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_create_write_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient.create_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.CreateWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "CreateWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.CreateWriteStreamRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "write_stream", + "type": "google.cloud.bigquery_storage_v1beta2.types.WriteStream" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.WriteStream", + "shortName": "create_write_stream" + }, + "description": "Sample for CreateWriteStream", + "file": "bigquerystorage_v1beta2_generated_big_query_write_create_write_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_CreateWriteStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_create_write_stream_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient.finalize_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FinalizeWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FinalizeWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamResponse", + "shortName": "finalize_write_stream" + }, + "description": "Sample for FinalizeWriteStream", + "file": "bigquerystorage_v1beta2_generated_big_query_write_finalize_write_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FinalizeWriteStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_finalize_write_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient.finalize_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FinalizeWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FinalizeWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.FinalizeWriteStreamResponse", + "shortName": "finalize_write_stream" + }, + "description": "Sample for FinalizeWriteStream", + "file": "bigquerystorage_v1beta2_generated_big_query_write_finalize_write_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FinalizeWriteStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_finalize_write_stream_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient.flush_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FlushRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FlushRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest" + }, + { + "name": "write_stream", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.FlushRowsResponse", + "shortName": "flush_rows" + }, + "description": "Sample for FlushRows", + "file": "bigquerystorage_v1beta2_generated_big_query_write_flush_rows_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FlushRows_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_flush_rows_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient.flush_rows", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FlushRows", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "FlushRows" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.FlushRowsRequest" + }, + { + "name": "write_stream", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.FlushRowsResponse", + "shortName": "flush_rows" + }, + "description": "Sample for FlushRows", + "file": "bigquerystorage_v1beta2_generated_big_query_write_flush_rows_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FlushRows_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_flush_rows_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient", + "shortName": "BigQueryWriteAsyncClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteAsyncClient.get_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.GetWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "GetWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.WriteStream", + "shortName": "get_write_stream" + }, + "description": "Sample for GetWriteStream", + "file": "bigquerystorage_v1beta2_generated_big_query_write_get_write_stream_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_GetWriteStream_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_get_write_stream_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient", + "shortName": "BigQueryWriteClient" + }, + "fullName": "google.cloud.bigquery_storage_v1beta2.BigQueryWriteClient.get_write_stream", + "method": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.GetWriteStream", + "service": { + "fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite", + "shortName": "BigQueryWrite" + }, + "shortName": "GetWriteStream" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.bigquery_storage_v1beta2.types.GetWriteStreamRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.bigquery_storage_v1beta2.types.WriteStream", + "shortName": "get_write_stream" + }, + "description": "Sample for GetWriteStream", + "file": "bigquerystorage_v1beta2_generated_big_query_write_get_write_stream_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_GetWriteStream_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "bigquerystorage_v1beta2_generated_big_query_write_get_write_stream_sync.py" + } + ] +} diff --git a/scripts/fixup_bigquery_storage_v1_keywords.py b/scripts/fixup_bigquery_storage_v1_keywords.py index 47599a5d..893b9806 100644 --- a/scripts/fixup_bigquery_storage_v1_keywords.py +++ b/scripts/fixup_bigquery_storage_v1_keywords.py @@ -39,7 +39,7 @@ def partition( class bigquery_storageCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'append_rows': ('write_stream', 'offset', 'proto_rows', 'trace_id', ), + 'append_rows': ('write_stream', 'offset', 'proto_rows', 'trace_id', 'missing_value_interpretations', ), 'batch_commit_write_streams': ('parent', 'write_streams', ), 'create_read_session': ('parent', 'read_session', 'max_stream_count', 'preferred_min_stream_count', ), 'create_write_stream': ('parent', 'write_stream', ), diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.10.txt +++ b/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.11.txt +++ b/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index ccfc5ef5..6f3158cc 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -1,14 +1,9 @@ # This constraints file is used to check that lower bounds # are correct in setup.py -# List *all* library dependencies and extras in this file. +# List all library dependencies and extras in this file. # Pin the version to the lower bound. -# -# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", -# Then this file should have foo==1.14.0 -google-api-core==1.32.0 +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.33.2 proto-plus==1.22.0 -libcst==0.2.5 -fastavro==0.21.2 -pandas==1.0.5 -pyarrow==0.15.0 protobuf==3.19.5 diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.9.txt +++ b/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf