From 73a07d4f3afc55d87109f20967cdc51f7c188962 Mon Sep 17 00:00:00 2001 From: Gurov Ilya Date: Sat, 26 Oct 2019 11:20:23 +0300 Subject: [PATCH] refactor(bigquery): rewrite docs in Google style, part 2 (#9481) towards issue #9092 --- bigquery/google/cloud/bigquery/_helpers.py | 83 ++- .../google/cloud/bigquery/_pandas_helpers.py | 9 +- bigquery/google/cloud/bigquery/client.py | 512 ++++++++------- .../google/cloud/bigquery/dbapi/_helpers.py | 55 +- .../google/cloud/bigquery/dbapi/cursor.py | 142 +++-- bigquery/google/cloud/bigquery/job.py | 595 +++++++++--------- bigquery/google/cloud/bigquery/query.py | 3 +- bigquery/google/cloud/bigquery/table.py | 10 +- 8 files changed, 701 insertions(+), 708 deletions(-) diff --git a/bigquery/google/cloud/bigquery/_helpers.py b/bigquery/google/cloud/bigquery/_helpers.py index bcb9d0696bc3..266bfc2c666c 100644 --- a/bigquery/google/cloud/bigquery/_helpers.py +++ b/bigquery/google/cloud/bigquery/_helpers.py @@ -90,12 +90,15 @@ def _timestamp_query_param_from_json(value, field): Args: value (str): The timestamp. - field (.SchemaField): The field corresponding to the value. + + field (google.cloud.bigquery.schema.SchemaField): + The field corresponding to the value. Returns: - Optional[datetime.datetime]: The parsed datetime object from - ``value`` if the ``field`` is not null (otherwise it is - :data:`None`). + Optional[datetime.datetime]: + The parsed datetime object from + ``value`` if the ``field`` is not null (otherwise it is + :data:`None`). """ if _not_null(value, field): # Canonical formats for timestamps in BigQuery are flexible. See: @@ -125,12 +128,14 @@ def _datetime_from_json(value, field): Args: value (str): The timestamp. - field (.SchemaField): The field corresponding to the value. + field (google.cloud.bigquery.schema.SchemaField): + The field corresponding to the value. Returns: - Optional[datetime.datetime]: The parsed datetime object from - ``value`` if the ``field`` is not null (otherwise it is - :data:`None`). + Optional[datetime.datetime]: + The parsed datetime object from + ``value`` if the ``field`` is not null (otherwise it is + :data:`None`). """ if _not_null(value, field): if "." in value: @@ -217,15 +222,12 @@ def _row_tuple_from_json(row, schema): Note: ``row['f']`` and ``schema`` are presumed to be of the same length. - :type row: dict - :param row: A JSON response row to be converted. - - :type schema: tuple - :param schema: A tuple of - :class:`~google.cloud.bigquery.schema.SchemaField`. + Args: + row (Dict): A JSON response row to be converted. + schema (Tuple): A tuple of :class:`~google.cloud.bigquery.schema.SchemaField`. - :rtype: tuple - :returns: A tuple of data converted to native types. + Returns: + Tuple: A tuple of data converted to native types. """ row_data = [] for field, cell in zip(schema, row["f"]): @@ -344,16 +346,13 @@ def _scalar_field_to_json(field, row_value): """Maps a field and value to a JSON-safe value. Args: - field ( \ - :class:`~google.cloud.bigquery.schema.SchemaField`, \ - ): + field (google.cloud.bigquery.schema.SchemaField): The SchemaField to use for type conversion and field name. - row_value (any): + row_value (Any): Value to be converted, based on the field's type. Returns: - any: - A JSON-serializable object. + Any: A JSON-serializable object. """ converter = _SCALAR_VALUE_TO_JSON_ROW.get(field.field_type) if converter is None: # STRING doesn't need converting @@ -365,17 +364,14 @@ def _repeated_field_to_json(field, row_value): """Convert a repeated/array field to its JSON representation. Args: - field ( \ - :class:`~google.cloud.bigquery.schema.SchemaField`, \ - ): + field (google.cloud.bigquery.schema.SchemaField): The SchemaField to use for type conversion and field name. The field mode must equal ``REPEATED``. - row_value (Sequence[any]): + row_value (Sequence[Any]): A sequence of values to convert to JSON-serializable values. Returns: - List[any]: - A list of JSON-serializable objects. + List[Any]: A list of JSON-serializable objects. """ # Remove the REPEATED, but keep the other fields. This allows us to process # each item as if it were a top-level field. @@ -391,17 +387,14 @@ def _record_field_to_json(fields, row_value): """Convert a record/struct field to its JSON representation. Args: - fields ( \ - Sequence[:class:`~google.cloud.bigquery.schema.SchemaField`], \ - ): + fields (Sequence[google.cloud.bigquery.schema.SchemaField]): The :class:`~google.cloud.bigquery.schema.SchemaField`s of the record's subfields to use for type conversion and field names. row_value (Union[Tuple[Any], Mapping[str, Any]): A tuple or dictionary to convert to JSON-serializable values. Returns: - Mapping[str, any]: - A JSON-serializable dictionary. + Mapping[str, Any]: A JSON-serializable dictionary. """ record = {} isdict = isinstance(row_value, dict) @@ -420,22 +413,16 @@ def _field_to_json(field, row_value): """Convert a field into JSON-serializable values. Args: - field ( \ - :class:`~google.cloud.bigquery.schema.SchemaField`, \ - ): + field (google.cloud.bigquery.schema.SchemaField): The SchemaField to use for type conversion and field name. - row_value (Union[ \ - Sequence[list], \ - any, \ - ]): + row_value (Union[Sequence[List], Any]): Row data to be inserted. If the SchemaField's mode is REPEATED, assume this is a list. If not, the type is inferred from the SchemaField's field_type. Returns: - any: - A JSON-serializable object. + Any: A JSON-serializable object. """ if row_value is None: return None @@ -461,9 +448,9 @@ def _get_sub_prop(container, keys, default=None): This method works like ``dict.get(key)``, but for nested values. Arguments: - container (dict): + container (Dict): A dictionary which may contain other dictionaries as values. - keys (iterable): + keys (Iterable): A sequence of keys to attempt to get the value for. Each item in the sequence represents a deeper nesting. The first key is for the top level. If there is a dictionary there, the second key @@ -504,9 +491,9 @@ def _set_sub_prop(container, keys, value): """Set a nested value in a dictionary. Arguments: - container (dict): + container (Dict): A dictionary which may contain other dictionaries as values. - keys (iterable): + keys (Iterable): A sequence of keys to attempt to set the value for. Each item in the sequence represents a deeper nesting. The first key is for the top level. If there is a dictionary there, the second key @@ -547,9 +534,9 @@ def _del_sub_prop(container, keys): """Remove a nested key fro a dictionary. Arguments: - container (dict): + container (Dict): A dictionary which may contain other dictionaries as values. - keys (iterable): + keys (Iterable): A sequence of keys to attempt to clear the value for. Each item in the sequence represents a deeper nesting. The first key is for the top level. If there is a dictionary there, the second key diff --git a/bigquery/google/cloud/bigquery/_pandas_helpers.py b/bigquery/google/cloud/bigquery/_pandas_helpers.py index fc0010361f24..c7edf2ae51f5 100644 --- a/bigquery/google/cloud/bigquery/_pandas_helpers.py +++ b/bigquery/google/cloud/bigquery/_pandas_helpers.py @@ -130,7 +130,8 @@ def bq_to_arrow_struct_data_type(field): def bq_to_arrow_data_type(field): """Return the Arrow data type, corresponding to a given BigQuery column. - Returns None if default Arrow type inspection should be used. + Returns: + None: if default Arrow type inspection should be used. """ if field.mode is not None and field.mode.upper() == "REPEATED": inner_type = bq_to_arrow_data_type( @@ -152,7 +153,8 @@ def bq_to_arrow_data_type(field): def bq_to_arrow_field(bq_field): """Return the Arrow field, corresponding to a given BigQuery column. - Returns None if the Arrow type cannot be determined. + Returns: + None: if the Arrow type cannot be determined. """ arrow_type = bq_to_arrow_data_type(bq_field) if arrow_type: @@ -166,7 +168,8 @@ def bq_to_arrow_field(bq_field): def bq_to_arrow_schema(bq_schema): """Return the Arrow schema, corresponding to a given BigQuery schema. - Returns None if any Arrow type cannot be determined. + Returns: + None: if any Arrow type cannot be determined. """ arrow_fields = [] for bq_field in bq_schema: diff --git a/bigquery/google/cloud/bigquery/client.py b/bigquery/google/cloud/bigquery/client.py index e7810dbbd66a..02bfc651af0d 100644 --- a/bigquery/google/cloud/bigquery/client.py +++ b/bigquery/google/cloud/bigquery/client.py @@ -96,14 +96,12 @@ class Project(object): """Wrapper for resource describing a BigQuery project. - :type project_id: str - :param project_id: Opaque ID of the project + Args: + project_id (str): Opaque ID of the project - :type numeric_id: int - :param numeric_id: Numeric ID of the project + numeric_id (int): Numeric ID of the project - :type friendly_name: str - :param friendly_name: Display name of the project + friendly_name (str): Display name of the project """ def __init__(self, project_id, numeric_id, friendly_name): @@ -147,7 +145,7 @@ class Client(ClientWithProject): requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own library or partner tool. - client_options (Union[~google.api_core.client_options.ClientOptions, dict]): + client_options (Union[google.api_core.client_options.ClientOptions, Dict]): (Optional) Client options used to set user options on the client. API Endpoint should be set through client_options. @@ -231,25 +229,25 @@ def list_projects(self, max_results=None, page_token=None, retry=DEFAULT_RETRY): See https://cloud.google.com/bigquery/docs/reference/rest/v2/projects/list - :type max_results: int - :param max_results: (Optional) maximum number of projects to return, - If not passed, defaults to a value set by the API. - - :type page_token: str - :param page_token: - (Optional) Token representing a cursor into the projects. If - not passed, the API will return the first page of projects. - The token marks the beginning of the iterator to be returned - and the value of the ``page_token`` can be accessed at - ``next_page_token`` of the - :class:`~google.api_core.page_iterator.HTTPIterator`. - - :type retry: :class:`google.api_core.retry.Retry` - :param retry: (Optional) How to retry the RPC. - - :rtype: :class:`~google.api_core.page_iterator.Iterator` - :returns: Iterator of :class:`~google.cloud.bigquery.client.Project` - accessible to the current client. + Args: + max_results (int): + (Optional) maximum number of projects to return, + If not passed, defaults to a value set by the API. + + page_token (str): + (Optional) Token representing a cursor into the projects. If + not passed, the API will return the first page of projects. + The token marks the beginning of the iterator to be returned + and the value of the ``page_token`` can be accessed at + ``next_page_token`` of the + :class:`~google.api_core.page_iterator.HTTPIterator`. + + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. + + Returns: + google.api_core.page_iterator.Iterator: + Iterator of :class:`~google.cloud.bigquery.client.Project` + accessible to the current client. """ return page_iterator.HTTPIterator( client=self, @@ -300,8 +298,7 @@ def list_datasets( Returns: google.api_core.page_iterator.Iterator: - Iterator of - :class:`~google.cloud.bigquery.dataset.DatasetListItem`. + Iterator of :class:`~google.cloud.bigquery.dataset.DatasetListItem`. associated with the project. """ extra_params = {} @@ -328,15 +325,16 @@ def list_datasets( def dataset(self, dataset_id, project=None): """Construct a reference to a dataset. - :type dataset_id: str - :param dataset_id: ID of the dataset. + Args: + dataset_id (str): ID of the dataset. - :type project: str - :param project: (Optional) project ID for the dataset (defaults to - the project of the client). + project (str): + (Optional) project ID for the dataset (defaults to + the project of the client). - :rtype: :class:`google.cloud.bigquery.dataset.DatasetReference` - :returns: a new ``DatasetReference`` instance + Returns: + google.cloud.bigquery.dataset.DatasetReference: + a new ``DatasetReference`` instance. """ if project is None: project = self.project @@ -351,8 +349,8 @@ def create_dataset(self, dataset, exists_ok=False, retry=DEFAULT_RETRY): Args: dataset (Union[ \ - :class:`~google.cloud.bigquery.dataset.Dataset`, \ - :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ + google.cloud.bigquery.dataset.Dataset, \ + google.cloud.bigquery.dataset.DatasetReference, \ str, \ ]): A :class:`~google.cloud.bigquery.dataset.Dataset` to create. @@ -404,7 +402,7 @@ def create_routine(self, routine, exists_ok=False, retry=DEFAULT_RETRY): https://cloud.google.com/bigquery/docs/reference/rest/v2/routines/insert Args: - routine (:class:`~google.cloud.bigquery.routine.Routine`): + routine (google.cloud.bigquery.routine.Routine): A :class:`~google.cloud.bigquery.routine.Routine` to create. The dataset that the routine belongs to must already exist. exists_ok (bool): @@ -440,8 +438,8 @@ def create_table(self, table, exists_ok=False, retry=DEFAULT_RETRY): Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): A :class:`~google.cloud.bigquery.table.Table` to create. @@ -481,14 +479,14 @@ def get_dataset(self, dataset_ref, retry=DEFAULT_RETRY): Args: dataset_ref (Union[ \ - :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ + google.cloud.bigquery.dataset.DatasetReference, \ str, \ ]): A reference to the dataset to fetch from the BigQuery API. If a string is passed in, this method attempts to create a dataset reference from a string using :func:`~google.cloud.bigquery.dataset.DatasetReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: @@ -508,19 +506,18 @@ def get_model(self, model_ref, retry=DEFAULT_RETRY): Args: model_ref (Union[ \ - :class:`~google.cloud.bigquery.model.ModelReference`, \ + google.cloud.bigquery.model.ModelReference, \ str, \ ]): A reference to the model to fetch from the BigQuery API. If a string is passed in, this method attempts to create a model reference from a string using :func:`google.cloud.bigquery.model.ModelReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: - google.cloud.bigquery.model.Model: - A ``Model`` instance. + google.cloud.bigquery.model.Model: A ``Model`` instance. """ if isinstance(model_ref, str): model_ref = ModelReference.from_string( @@ -535,15 +532,15 @@ def get_routine(self, routine_ref, retry=DEFAULT_RETRY): Args: routine_ref (Union[ \ - :class:`~google.cloud.bigquery.routine.Routine`, \ - :class:`~google.cloud.bigquery.routine.RoutineReference`, \ + google.cloud.bigquery.routine.Routine, \ + google.cloud.bigquery.routine.RoutineReference, \ str, \ ]): A reference to the routine to fetch from the BigQuery API. If a string is passed in, this method attempts to create a reference from a string using :func:`google.cloud.bigquery.routine.RoutineReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the API call. Returns: @@ -563,15 +560,15 @@ def get_table(self, table, retry=DEFAULT_RETRY): Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): A reference to the table to fetch from the BigQuery API. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: @@ -744,8 +741,8 @@ def list_models( Args: dataset (Union[ \ - :class:`~google.cloud.bigquery.dataset.Dataset`, \ - :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ + google.cloud.bigquery.dataset.Dataset, \ + google.cloud.bigquery.dataset.DatasetReference, \ str, \ ]): A reference to the dataset whose models to list from the @@ -762,7 +759,7 @@ def list_models( the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: @@ -802,8 +799,8 @@ def list_routines( Args: dataset (Union[ \ - :class:`~google.cloud.bigquery.dataset.Dataset`, \ - :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ + google.cloud.bigquery.dataset.Dataset, \ + google.cloud.bigquery.dataset.DatasetReference, \ str, \ ]): A reference to the dataset whose routines to list from the @@ -820,7 +817,7 @@ def list_routines( the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: @@ -860,8 +857,8 @@ def list_tables( Args: dataset (Union[ \ - :class:`~google.cloud.bigquery.dataset.Dataset`, \ - :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ + google.cloud.bigquery.dataset.Dataset, \ + google.cloud.bigquery.dataset.DatasetReference, \ str, \ ]): A reference to the dataset whose tables to list from the @@ -878,7 +875,7 @@ def list_tables( the value of the ``page_token`` can be accessed at ``next_page_token`` of the :class:`~google.api_core.page_iterator.HTTPIterator`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: @@ -918,8 +915,8 @@ def delete_dataset( Args dataset (Union[ \ - :class:`~google.cloud.bigquery.dataset.Dataset`, \ - :class:`~google.cloud.bigquery.dataset.DatasetReference`, \ + google.cloud.bigquery.dataset.Dataset, \ + google.cloud.bigquery.dataset.DatasetReference, \ str, \ ]): A reference to the dataset to delete. If a string is passed @@ -930,7 +927,7 @@ def delete_dataset( (Optional) If True, delete all the tables in the dataset. If False and the dataset contains tables, the request will fail. Default is False. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. not_found_ok (bool): Defaults to ``False``. If ``True``, ignore "not found" errors @@ -964,15 +961,15 @@ def delete_model(self, model, retry=DEFAULT_RETRY, not_found_ok=False): Args: model (Union[ \ - :class:`~google.cloud.bigquery.model.Model`, \ - :class:`~google.cloud.bigquery.model.ModelReference`, \ + google.cloud.bigquery.model.Model, \ + google.cloud.bigquery.model.ModelReference, \ str, \ ]): A reference to the model to delete. If a string is passed in, this method attempts to create a model reference from a string using :func:`google.cloud.bigquery.model.ModelReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. not_found_ok (bool): Defaults to ``False``. If ``True``, ignore "not found" errors @@ -998,15 +995,15 @@ def delete_routine(self, routine, retry=DEFAULT_RETRY, not_found_ok=False): Args: model (Union[ \ - :class:`~google.cloud.bigquery.routine.Routine`, \ - :class:`~google.cloud.bigquery.routine.RoutineReference`, \ + google.cloud.bigquery.routine.Routine, \ + google.cloud.bigquery.routine.RoutineReference, \ str, \ ]): A reference to the routine to delete. If a string is passed in, this method attempts to create a routine reference from a string using :func:`google.cloud.bigquery.routine.RoutineReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. not_found_ok (bool): Defaults to ``False``. If ``True``, ignore "not found" errors @@ -1034,15 +1031,15 @@ def delete_table(self, table, retry=DEFAULT_RETRY, not_found_ok=False): Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): A reference to the table to delete. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. not_found_ok (bool): Defaults to ``False``. If ``True``, ignore "not found" errors @@ -1107,15 +1104,17 @@ def _get_query_results( def job_from_resource(self, resource): """Detect correct job type from resource and instantiate. - :type resource: dict - :param resource: one job resource from API response + Args: + resource (Dict): one job resource from API response - :rtype: One of: - :class:`google.cloud.bigquery.job.LoadJob`, - :class:`google.cloud.bigquery.job.CopyJob`, - :class:`google.cloud.bigquery.job.ExtractJob`, - or :class:`google.cloud.bigquery.job.QueryJob` - :returns: the job instance, constructed via the resource + Returns: + Union[ \ + google.cloud.bigquery.job.LoadJob, \ + google.cloud.bigquery.job.CopyJob, \ + google.cloud.bigquery.job.ExtractJob, \ + google.cloud.bigquery.job.QueryJob \ + ]: + The job instance, constructed via the resource. """ config = resource.get("configuration", {}) if "load" in config: @@ -1146,10 +1145,12 @@ def get_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY): (Optional) How to retry the RPC. Returns: - Union[google.cloud.bigquery.job.LoadJob, \ - google.cloud.bigquery.job.CopyJob, \ - google.cloud.bigquery.job.ExtractJob, \ - google.cloud.bigquery.job.QueryJob]: + Union[ \ + google.cloud.bigquery.job.LoadJob, \ + google.cloud.bigquery.job.CopyJob, \ + google.cloud.bigquery.job.ExtractJob, \ + google.cloud.bigquery.job.QueryJob \ + ]: Job instance, based on the resource returned by the API. """ extra_params = {"projection": "full"} @@ -1177,7 +1178,7 @@ def cancel_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY): See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel - Arguments: + Args: job_id (str): Unique job identifier. Keyword Arguments: @@ -1189,10 +1190,12 @@ def cancel_job(self, job_id, project=None, location=None, retry=DEFAULT_RETRY): (Optional) How to retry the RPC. Returns: - Union[google.cloud.bigquery.job.LoadJob, \ - google.cloud.bigquery.job.CopyJob, \ - google.cloud.bigquery.job.ExtractJob, \ - google.cloud.bigquery.job.QueryJob]: + Union[ \ + google.cloud.bigquery.job.LoadJob, \ + google.cloud.bigquery.job.CopyJob, \ + google.cloud.bigquery.job.ExtractJob, \ + google.cloud.bigquery.job.QueryJob, \ + ]: Job instance, based on the resource returned by the API. """ extra_params = {"projection": "full"} @@ -1232,38 +1235,38 @@ def list_jobs( https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/list Args: - project (str, optional): + project (Optional[str]): Project ID to use for retreiving datasets. Defaults to the client's project. parent_job (Optional[Union[ \ - :class:`~google.cloud.bigquery.job._AsyncJob`, \ + google.cloud.bigquery.job._AsyncJob, \ str, \ ]]): If set, retrieve only child jobs of the specified parent. - max_results (int, optional): + max_results (Optional[int]): Maximum number of jobs to return. - page_token (str, optional): + page_token (Optional[str]): Opaque marker for the next "page" of jobs. If not passed, the API will return the first page of jobs. The token marks the beginning of the iterator to be returned and the value of the ``page_token`` can be accessed at ``next_page_token`` of :class:`~google.api_core.page_iterator.HTTPIterator`. - all_users (bool, optional): + all_users (Optional[bool]): If true, include jobs owned by all users in the project. Defaults to :data:`False`. - state_filter (str, optional): + state_filter (Optional[str]): If set, include only jobs matching the given state. One of: * ``"done"`` * ``"pending"`` * ``"running"`` - retry (google.api_core.retry.Retry, optional): + retry (Optional[google.api_core.retry.Retry]): How to retry the RPC. - min_creation_time (datetime.datetime, optional): + min_creation_time (Optional[datetime.datetime]): Min value for job creation time. If set, only jobs created after or at this timestamp are returned. If the datetime has no time zone assumes UTC time. - max_creation_time (datetime.datetime, optional): + max_creation_time (Optional[datetime.datetime]): Max value for job creation time. If set, only jobs created before or at this timestamp are returned. If the datetime has no time zone assumes UTC time. @@ -1328,8 +1331,8 @@ def load_table_from_uri( URIs of data files to be loaded; in format ``gs:///``. destination (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): Table into which data is to be loaded. If a string is passed @@ -1406,8 +1409,8 @@ def load_table_from_file( Arguments: file_obj (file): A file handle opened in binary mode for reading. destination (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): Table into which data is to be loaded. If a string is passed @@ -1517,19 +1520,19 @@ def load_table_from_dataframe( :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: - num_retries (int, optional): Number of upload retries. - job_id (str, optional): Name of the job. - job_id_prefix (str, optional): + num_retries (Optional[int]): Number of upload retries. + job_id (Optional[str]): Name of the job. + job_id_prefix (Optional[str]): The user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. - project (str, optional): + project (Optional[str]): Project ID of the project of where to run the job. Defaults to the client's project. - job_config (~google.cloud.bigquery.job.LoadJobConfig, optional): + job_config (Optional[google.cloud.bigquery.job.LoadJobConfig]): Extra configuration options for the job. To override the default pandas data type conversions, supply @@ -1672,7 +1675,7 @@ def load_table_from_json( ): """Upload the contents of a table from a JSON string or dict. - Arguments: + Args: json_rows (Iterable[Dict[str, Any]]): Row data to be inserted. Keys must match the table schema fields and values must be JSON-compatible representations. @@ -1693,8 +1696,8 @@ def load_table_from_json( client.load_table_from_file(data_as_file, ...) destination (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): Table into which data is to be loaded. If a string is passed @@ -1703,7 +1706,7 @@ def load_table_from_json( :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: - num_retries (int, optional): Number of upload retries. + num_retries (Optional[int]): Number of upload retries. job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated @@ -1767,19 +1770,19 @@ def load_table_from_json( def _do_resumable_upload(self, stream, metadata, num_retries): """Perform a resumable upload. - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. + Args: + stream (IO[bytes]): A bytes IO object open for reading. - :type metadata: dict - :param metadata: The metadata associated with the upload. + metadata (Dict): The metadata associated with the upload. - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) + num_retries (int): + Number of upload retries. (Deprecated: This + argument will be removed in a future release.) - :rtype: :class:`~requests.Response` - :returns: The "200 OK" response object returned after the final chunk - is uploaded. + Returns: + requests.Response: + The "200 OK" response object returned after the final chunk + is uploaded. """ upload, transport = self._initiate_resumable_upload( stream, metadata, num_retries @@ -1793,23 +1796,22 @@ def _do_resumable_upload(self, stream, metadata, num_retries): def _initiate_resumable_upload(self, stream, metadata, num_retries): """Initiate a resumable upload. - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. + Args: + stream (IO[bytes]): A bytes IO object open for reading. - :type metadata: dict - :param metadata: The metadata associated with the upload. + metadata (Dict): The metadata associated with the upload. - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) + num_retries (int): + Number of upload retries. (Deprecated: This + argument will be removed in a future release.) - :rtype: tuple - :returns: - Pair of + Returns: + Tuple: + Pair of - * The :class:`~google.resumable_media.requests.ResumableUpload` - that was created - * The ``transport`` used to initiate the upload. + * The :class:`~google.resumable_media.requests.ResumableUpload` + that was created + * The ``transport`` used to initiate the upload. """ chunk_size = _DEFAULT_CHUNKSIZE transport = self._http @@ -1833,26 +1835,29 @@ def _initiate_resumable_upload(self, stream, metadata, num_retries): def _do_multipart_upload(self, stream, metadata, size, num_retries): """Perform a multipart upload. - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. + Args: + stream (IO[bytes]): A bytes IO object open for reading. - :type metadata: dict - :param metadata: The metadata associated with the upload. + metadata (Dict): The metadata associated with the upload. - :type size: int - :param size: The number of bytes to be uploaded (which will be read - from ``stream``). If not provided, the upload will be - concluded once ``stream`` is exhausted (or :data:`None`). + size (int): + The number of bytes to be uploaded (which will be read + from ``stream``). If not provided, the upload will be + concluded once ``stream`` is exhausted (or :data:`None`). + + num_retries (int): + Number of upload retries. (Deprecated: This + argument will be removed in a future release.) - :type num_retries: int - :param num_retries: Number of upload retries. (Deprecated: This - argument will be removed in a future release.) + Returns: + requests.Response: + The "200 OK" response object returned after the multipart + upload request. - :rtype: :class:`~requests.Response` - :returns: The "200 OK" response object returned after the multipart - upload request. - :raises: :exc:`ValueError` if the ``stream`` has fewer than ``size`` - bytes remaining. + Raises: + ValueError: + if the ``stream`` has fewer than ``size`` + bytes remaining. """ data = stream.read(size) if len(data) < size: @@ -1889,23 +1894,23 @@ def copy_table( See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationtablecopy - Arguments: + Args: sources (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ Sequence[ \ Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ] \ ], \ ]): Table or tables to be copied. - destination (Union[ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + destination (Union[ \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): Table into which data is to be copied. @@ -1987,10 +1992,10 @@ def extract_table( See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationextract - Arguments: + Args: source (Union[ \ - :class:`google.cloud.bigquery.table.Table`, \ - :class:`google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ src, \ ]): Table to be extracted. @@ -2015,9 +2020,8 @@ def extract_table( (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. - :type source: :class:`google.cloud.bigquery.table.TableReference` - :param source: table to be extracted. - + Args: + source (google.cloud.bigquery.table.TableReference): table to be extracted. Returns: google.cloud.bigquery.job.ExtractJob: A new extract job instance. @@ -2067,7 +2071,7 @@ def query( See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#jobconfigurationquery - Arguments: + Args: query (str): SQL query to be executed. Defaults to the standard SQL dialect. Use the ``job_config`` parameter to change dialects. @@ -2141,27 +2145,22 @@ def insert_rows(self, table, rows, selected_fields=None, **kwargs): Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): The destination table for the row data, or a reference to it. - rows (Union[ \ - Sequence[Tuple], \ - Sequence[dict], \ - ]): + rows (Union[Sequence[Tuple], Sequence[dict]]): Row data to be inserted. If a list of tuples is given, each tuple should contain data for each schema field on the current table and in the same order as the schema fields. If a list of dictionaries is given, the keys must include all required fields in the schema. Keys which do not correspond to a field in the schema are ignored. - selected_fields (Sequence[ \ - :class:`~google.cloud.bigquery.schema.SchemaField`, \ - ]): + selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]): The fields to return. Required if ``table`` is a :class:`~google.cloud.bigquery.table.TableReference`. - kwargs (dict): + kwargs (Dict): Keyword arguments to :meth:`~google.cloud.bigquery.client.Client.insert_rows_json`. @@ -2204,21 +2203,19 @@ def insert_rows_from_dataframe( Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): The destination table for the row data, or a reference to it. dataframe (pandas.DataFrame): A :class:`~pandas.DataFrame` containing the data to load. - selected_fields (Sequence[ \ - :class:`~google.cloud.bigquery.schema.SchemaField`, \ - ]): + selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]): The fields to return. Required if ``table`` is a :class:`~google.cloud.bigquery.table.TableReference`. chunk_size (int): The number of rows to stream in a single chunk. Must be positive. - kwargs (dict): + kwargs (Dict): Keyword arguments to :meth:`~google.cloud.bigquery.client.Client.insert_rows_json`. @@ -2263,33 +2260,33 @@ def insert_rows_json( See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll - table (Union[ \ - :class:`~google.cloud.bigquery.table.Table` \ - :class:`~google.cloud.bigquery.table.TableReference`, \ - str, \ - ]): - The destination table for the row data, or a reference to it. - json_rows (Sequence[dict]): - Row data to be inserted. Keys must match the table schema fields - and values must be JSON-compatible representations. - row_ids (Sequence[str]): - (Optional) Unique ids, one per row being inserted. If omitted, - unique IDs are created. - skip_invalid_rows (bool): - (Optional) Insert all valid rows of a request, even if invalid - rows exist. The default value is False, which causes the entire - request to fail if any invalid rows exist. - ignore_unknown_values (bool): - (Optional) Accept rows that contain values that do not match the - schema. The unknown values are ignored. Default is False, which - treats unknown values as errors. - template_suffix (str): - (Optional) treat ``name`` as a template table and provide a suffix. - BigQuery will create the table `` + `` based - on the schema of the template table. See - https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables - retry (:class:`google.api_core.retry.Retry`): - (Optional) How to retry the RPC. + Args: + table (Union[ \ + google.cloud.bigquery.table.Table \ + google.cloud.bigquery.table.TableReference, \ + str, \ + ]): + The destination table for the row data, or a reference to it. + json_rows (Sequence[Dict]): + Row data to be inserted. Keys must match the table schema fields + and values must be JSON-compatible representations. + row_ids (Sequence[str]): + (Optional) Unique ids, one per row being inserted. If omitted, + unique IDs are created. + skip_invalid_rows (bool): + (Optional) Insert all valid rows of a request, even if invalid + rows exist. The default value is False, which causes the entire + request to fail if any invalid rows exist. + ignore_unknown_values (bool): + (Optional) Accept rows that contain values that do not match the + schema. The unknown values are ignored. Default is False, which + treats unknown values as errors. + template_suffix (str): + (Optional) treat ``name`` as a template table and provide a suffix. + BigQuery will create the table `` + `` based + on the schema of the template table. See + https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: Sequence[Mappings]: @@ -2335,10 +2332,10 @@ def insert_rows_json( def list_partitions(self, table, retry=DEFAULT_RETRY): """List the partitions in a table. - Arguments: + Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): The table or reference from which to get partition info @@ -2387,18 +2384,16 @@ def list_rows( Args: table (Union[ \ - :class:`~google.cloud.bigquery.table.Table`, \ - :class:`~google.cloud.bigquery.table.TableListItem`, \ - :class:`~google.cloud.bigquery.table.TableReference`, \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableListItem, \ + google.cloud.bigquery.table.TableReference, \ str, \ ]): The table to list, or a reference to it. When the table object does not contain a schema and ``selected_fields`` is not supplied, this method calls ``get_table`` to fetch the table schema. - selected_fields (Sequence[ \ - :class:`~google.cloud.bigquery.schema.SchemaField` \ - ]): + selected_fields (Sequence[google.cloud.bigquery.schema.SchemaField]): The fields to return. If not supplied, data for all columns are downloaded. max_results (int): @@ -2416,7 +2411,7 @@ def list_rows( Optional. The maximum number of rows in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. - retry (:class:`google.api_core.retry.Retry`): + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: @@ -2516,14 +2511,13 @@ def schema_to_json(self, schema_list, destination): def _item_to_project(iterator, resource): """Convert a JSON project to the native object. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. - :type resource: dict - :param resource: An item to be converted to a project. + resource (Dict): An item to be converted to a project. - :rtype: :class:`.Project` - :returns: The next project in the page. + Returns: + google.cloud.bigquery.client.Project: The next project in the page. """ return Project.from_api_repr(resource) @@ -2534,14 +2528,13 @@ def _item_to_project(iterator, resource): def _item_to_dataset(iterator, resource): """Convert a JSON dataset to the native object. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. - :type resource: dict - :param resource: An item to be converted to a dataset. + resource (Dict): An item to be converted to a dataset. - :rtype: :class:`.DatasetListItem` - :returns: The next dataset in the page. + Returns: + google.cloud.bigquery.dataset.DatasetListItem: The next dataset in the page. """ return DatasetListItem(resource) @@ -2549,14 +2542,13 @@ def _item_to_dataset(iterator, resource): def _item_to_job(iterator, resource): """Convert a JSON job to the native object. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. - :type resource: dict - :param resource: An item to be converted to a job. + resource (Dict): An item to be converted to a job. - :rtype: job instance. - :returns: The next job in the page. + Returns: + job instance: The next job in the page. """ return iterator.client.job_from_resource(resource) @@ -2567,8 +2559,7 @@ def _item_to_model(iterator, resource): Args: iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. - resource (dict): - An item to be converted to a model. + resource (Dict): An item to be converted to a model. Returns: google.cloud.bigquery.model.Model: The next model in the page. @@ -2582,8 +2573,7 @@ def _item_to_routine(iterator, resource): Args: iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. - resource (dict): - An item to be converted to a routine. + resource (Dict): An item to be converted to a routine. Returns: google.cloud.bigquery.routine.Routine: The next routine in the page. @@ -2594,14 +2584,13 @@ def _item_to_routine(iterator, resource): def _item_to_table(iterator, resource): """Convert a JSON table to the native object. - :type iterator: :class:`~google.api_core.page_iterator.Iterator` - :param iterator: The iterator that is currently in use. + Args: + iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use. - :type resource: dict - :param resource: An item to be converted to a table. + resource (Dict): An item to be converted to a table. - :rtype: :class:`~google.cloud.bigquery.table.Table` - :returns: The next table in the page. + Returns: + google.cloud.bigquery.table.Table: The next table in the page. """ return TableListItem(resource) @@ -2609,14 +2598,13 @@ def _item_to_table(iterator, resource): def _make_job_id(job_id, prefix=None): """Construct an ID for a new job. - :type job_id: str or ``NoneType`` - :param job_id: the user-provided job ID + Args: + job_id (Optional[str]): the user-provided job ID. - :type prefix: str or ``NoneType`` - :param prefix: (Optional) the user-provided prefix for a job ID + prefix (Optional[str]): the user-provided prefix for a job ID. - :rtype: str - :returns: A job ID + Returns: + str: A job ID """ if job_id is not None: return job_id @@ -2629,11 +2617,13 @@ def _make_job_id(job_id, prefix=None): def _check_mode(stream): """Check that a stream was opened in read-binary mode. - :type stream: IO[bytes] - :param stream: A bytes IO object open for reading. + Args: + stream (IO[bytes]): A bytes IO object open for reading. - :raises: :exc:`ValueError` if the ``stream.mode`` is a valid attribute - and is not among ``rb``, ``r+b`` or ``rb+``. + Raises: + ValueError: + if the ``stream.mode`` is a valid attribute + and is not among ``rb``, ``r+b`` or ``rb+``. """ mode = getattr(stream, "mode", None) @@ -2654,11 +2644,11 @@ def _check_mode(stream): def _get_upload_headers(user_agent): """Get the headers for an upload request. - :type user_agent: str - :param user_agent: The user-agent for requests. + Args: + user_agent (str): The user-agent for requests. - :rtype: dict - :returns: The headers to be used for the request. + Returns: + Dict: The headers to be used for the request. """ return { "Accept": "application/json", diff --git a/bigquery/google/cloud/bigquery/dbapi/_helpers.py b/bigquery/google/cloud/bigquery/dbapi/_helpers.py index e5f4cff51666..651880feac90 100644 --- a/bigquery/google/cloud/bigquery/dbapi/_helpers.py +++ b/bigquery/google/cloud/bigquery/dbapi/_helpers.py @@ -30,18 +30,21 @@ def scalar_to_query_parameter(value, name=None): """Convert a scalar value into a query parameter. - :type value: any - :param value: A scalar value to convert into a query parameter. + Args: + value (Any): + A scalar value to convert into a query parameter. - :type name: str - :param name: (Optional) Name of the query parameter. + name (str): + (Optional) Name of the query parameter. - :rtype: :class:`~google.cloud.bigquery.ScalarQueryParameter` - :returns: - A query parameter corresponding with the type and value of the plain - Python object. - :raises: :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError` - if the type cannot be determined. + Returns: + google.cloud.bigquery.ScalarQueryParameter: + A query parameter corresponding with the type and value of the plain + Python object. + + Raises: + google.cloud.bigquery.dbapi.exceptions.ProgrammingError: + if the type cannot be determined. """ parameter_type = bigquery_scalar_type(value) @@ -67,8 +70,8 @@ def array_to_query_parameter(value, name=None): Python object. Raises: - :class:`~google.cloud.bigquery.dbapi.exceptions.ProgrammingError` - if the type of array elements cannot be determined. + google.cloud.bigquery.dbapi.exceptions.ProgrammingError: + if the type of array elements cannot be determined. """ if not array_like(value): raise exceptions.ProgrammingError( @@ -97,11 +100,12 @@ def array_to_query_parameter(value, name=None): def to_query_parameters_list(parameters): """Converts a sequence of parameter values into query parameters. - :type parameters: Sequence[Any] - :param parameters: Sequence of query parameter values. + Args: + parameters (Sequence[Any]): Sequence of query parameter values. - :rtype: List[google.cloud.bigquery.query._AbstractQueryParameter] - :returns: A list of query parameters. + Returns: + List[google.cloud.bigquery.query._AbstractQueryParameter]: + A list of query parameters. """ result = [] @@ -120,11 +124,12 @@ def to_query_parameters_list(parameters): def to_query_parameters_dict(parameters): """Converts a dictionary of parameter values into query parameters. - :type parameters: Mapping[str, Any] - :param parameters: Dictionary of query parameter values. + Args: + parameters (Mapping[str, Any]): Dictionary of query parameter values. - :rtype: List[google.cloud.bigquery.query._AbstractQueryParameter] - :returns: A list of named query parameters. + Returns: + List[google.cloud.bigquery.query._AbstractQueryParameter]: + A list of named query parameters. """ result = [] @@ -146,11 +151,13 @@ def to_query_parameters_dict(parameters): def to_query_parameters(parameters): """Converts DB-API parameter values into query parameters. - :type parameters: Mapping[str, Any] or Sequence[Any] - :param parameters: A dictionary or sequence of query parameter values. + Args: + parameters (Union[Mapping[str, Any], Sequence[Any]]): + A dictionary or sequence of query parameter values. - :rtype: List[google.cloud.bigquery.query._AbstractQueryParameter] - :returns: A list of query parameters. + Returns: + List[google.cloud.bigquery.query._AbstractQueryParameter]: + A list of query parameters. """ if parameters is None: return [] diff --git a/bigquery/google/cloud/bigquery/dbapi/cursor.py b/bigquery/google/cloud/bigquery/dbapi/cursor.py index 9b7a895b367f..a3e6ea5be87e 100644 --- a/bigquery/google/cloud/bigquery/dbapi/cursor.py +++ b/bigquery/google/cloud/bigquery/dbapi/cursor.py @@ -49,8 +49,9 @@ class Cursor(object): """DB-API Cursor to Google BigQuery. - :type connection: :class:`~google.cloud.bigquery.dbapi.Connection` - :param connection: A DB-API connection to Google BigQuery. + Args: + connection (google.cloud.bigquery.dbapi.Connection): + A DB-API connection to Google BigQuery. """ def __init__(self, connection): @@ -74,8 +75,9 @@ def close(self): def _set_description(self, schema): """Set description from schema. - :type schema: Sequence[google.cloud.bigquery.schema.SchemaField] - :param schema: A description of fields in the schema. + Args: + schema (Sequence[google.cloud.bigquery.schema.SchemaField]): + A description of fields in the schema. """ if schema is None: self.description = None @@ -103,9 +105,9 @@ def _set_rowcount(self, query_results): query, but if it was a DML statement, it sets rowcount to the number of modified rows. - :type query_results: - :class:`~google.cloud.bigquery.query._QueryResults` - :param query_results: results of a query + Args: + query_results (google.cloud.bigquery.query._QueryResults): + Results of a query. """ total_rows = 0 num_dml_affected_rows = query_results.num_dml_affected_rows @@ -138,19 +140,18 @@ def execute(self, operation, parameters=None, job_id=None, job_config=None): yet supported. See: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/3524 - :type operation: str - :param operation: A Google BigQuery query string. + Args: + operation (str): A Google BigQuery query string. - :type parameters: Mapping[str, Any] or Sequence[Any] - :param parameters: - (Optional) dictionary or sequence of parameter values. + parameters (Union[Mapping[str, Any], Sequence[Any]]): + (Optional) dictionary or sequence of parameter values. - :type job_id: str - :param job_id: (Optional) The job_id to use. If not set, a job ID - is generated at random. + job_id (str): + (Optional) The job_id to use. If not set, a job ID + is generated at random. - :type job_config: :class:`~google.cloud.bigquery.job.QueryJobConfig` - :param job_config: (Optional) Extra configuration options for the query job. + job_config (google.cloud.bigquery.job.QueryJobConfig): + (Optional) Extra configuration options for the query job. """ self._query_data = None self._query_job = None @@ -182,11 +183,11 @@ def execute(self, operation, parameters=None, job_id=None, job_config=None): def executemany(self, operation, seq_of_parameters): """Prepare and execute a database operation multiple times. - :type operation: str - :param operation: A Google BigQuery query string. + Args: + operation (str): A Google BigQuery query string. - :type seq_of_parameters: Sequence[Mapping[str, Any] or Sequence[Any]] - :param parameters: Sequence of many sets of parameter values. + seq_of_parameters (Union[Sequence[Mapping[str, Any], Sequence[Any]]]): + Sequence of many sets of parameter values. """ for parameters in seq_of_parameters: self.execute(operation, parameters) @@ -221,12 +222,13 @@ def _try_fetch(self, size=None): def fetchone(self): """Fetch a single row from the results of the last ``execute*()`` call. - :rtype: tuple - :returns: - A tuple representing a row or ``None`` if no more data is - available. - :raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError` - if called before ``execute()``. + Returns: + Tuple: + A tuple representing a row or ``None`` if no more data is + available. + + Raises: + google.cloud.bigquery.dbapi.InterfaceError: if called before ``execute()``. """ self._try_fetch() try: @@ -242,16 +244,17 @@ def fetchmany(self, size=None): Set the ``arraysize`` attribute before calling ``execute()`` to set the batch size. - :type size: int - :param size: - (Optional) Maximum number of rows to return. Defaults to the - ``arraysize`` property value. If ``arraysize`` is not set, it - defaults to ``1``. + Args: + size (int): + (Optional) Maximum number of rows to return. Defaults to the + ``arraysize`` property value. If ``arraysize`` is not set, it + defaults to ``1``. + + Returns: + List[Tuple]: A list of rows. - :rtype: List[tuple] - :returns: A list of rows. - :raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError` - if called before ``execute()``. + Raises: + google.cloud.bigquery.dbapi.InterfaceError: if called before ``execute()``. """ if size is None: # Since self.arraysize can be None (a deviation from PEP 249), @@ -272,10 +275,11 @@ def fetchmany(self, size=None): def fetchall(self): """Fetch all remaining results from the last ``execute*()`` call. - :rtype: List[tuple] - :returns: A list of all the rows in the results. - :raises: :class:`~google.cloud.bigquery.dbapi.InterfaceError` - if called before ``execute()``. + Returns: + List[Tuple]: A list of all the rows in the results. + + Raises: + google.cloud.bigquery.dbapi.InterfaceError: if called before ``execute()``. """ self._try_fetch() return list(self._query_data) @@ -293,17 +297,18 @@ def _format_operation_list(operation, parameters): The input operation will be a query like ``SELECT %s`` and the output will be a query like ``SELECT ?``. - :type operation: str - :param operation: A Google BigQuery query string. + Args: + operation (str): A Google BigQuery query string. - :type parameters: Sequence[Any] - :param parameters: Sequence of parameter values. + parameters (Sequence[Any]): Sequence of parameter values. - :rtype: str - :returns: A formatted query string. - :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` - if a parameter used in the operation is not found in the - ``parameters`` argument. + Returns: + str: A formatted query string. + + Raises: + google.cloud.bigquery.dbapi.ProgrammingError: + if a parameter used in the operation is not found in the + ``parameters`` argument. """ formatted_params = ["?" for _ in parameters] @@ -319,17 +324,18 @@ def _format_operation_dict(operation, parameters): The input operation will be a query like ``SELECT %(namedparam)s`` and the output will be a query like ``SELECT @namedparam``. - :type operation: str - :param operation: A Google BigQuery query string. + Args: + operation (str): A Google BigQuery query string. + + parameters (Mapping[str, Any]): Dictionary of parameter values. - :type parameters: Mapping[str, Any] - :param parameters: Dictionary of parameter values. + Returns: + str: A formatted query string. - :rtype: str - :returns: A formatted query string. - :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` - if a parameter used in the operation is not found in the - ``parameters`` argument. + Raises: + google.cloud.bigquery.dbapi.ProgrammingError: + if a parameter used in the operation is not found in the + ``parameters`` argument. """ formatted_params = {} for name in parameters: @@ -345,17 +351,19 @@ def _format_operation_dict(operation, parameters): def _format_operation(operation, parameters=None): """Formats parameters in operation in way BigQuery expects. - :type: str - :param operation: A Google BigQuery query string. + Args: + operation (str): A Google BigQuery query string. + + parameters (Union[Mapping[str, Any], Sequence[Any]]): + Optional parameter values. - :type: Mapping[str, Any] or Sequence[Any] - :param parameters: Optional parameter values. + Returns: + str: A formatted query string. - :rtype: str - :returns: A formatted query string. - :raises: :class:`~google.cloud.bigquery.dbapi.ProgrammingError` - if a parameter used in the operation is not found in the - ``parameters`` argument. + Raises: + google.cloud.bigquery.dbapi.ProgrammingError: + if a parameter used in the operation is not found in the + ``parameters`` argument. """ if parameters is None: return operation diff --git a/bigquery/google/cloud/bigquery/job.py b/bigquery/google/cloud/bigquery/job.py index cfc5a3797c70..96724c9f805b 100644 --- a/bigquery/google/cloud/bigquery/job.py +++ b/bigquery/google/cloud/bigquery/job.py @@ -81,11 +81,11 @@ def _error_result_to_exception(error_result): .. _troubleshooting errors: https://cloud.google.com/bigquery\ /troubleshooting-errors - :type error_result: Mapping[str, str] - :param error_result: The error result from BigQuery. + Args: + error_result (Mapping[str, str]): The error result from BigQuery. - :rtype google.cloud.exceptions.GoogleCloudError: - :returns: The mapped exception. + Returns: + google.cloud.exceptions.GoogleCloudError: The mapped exception. """ reason = error_result.get("reason") status_code = _ERROR_REASON_TO_EXCEPTION.get( @@ -341,7 +341,7 @@ def parent_job_id(self): https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics.FIELDS.parent_job_id Returns: - Optional[str] + Optional[str]: parent job id. """ return _helpers._get_sub_prop(self._properties, ["statistics", "parentJobId"]) @@ -371,8 +371,8 @@ def num_child_jobs(self): def project(self): """Project bound to the job. - :rtype: str - :returns: the project (derived from the client). + Returns: + str: the project (derived from the client). """ return _helpers._get_sub_prop(self._properties, ["jobReference", "projectId"]) @@ -384,13 +384,14 @@ def location(self): def _require_client(self, client): """Check client or verify over-ride. - :type client: :class:`~google.cloud.bigquery.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. + Args: + client (Optional[google.cloud.bigquery.client.Client]): + the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. - :rtype: :class:`google.cloud.bigquery.client.Client` - :returns: The client passed in or the currently bound client. + Returns: + google.cloud.bigquery.client.Client: + The client passed in or the currently bound client. """ if client is None: client = self._client @@ -398,10 +399,10 @@ def _require_client(self, client): @property def job_type(self): - """Type of job + """Type of job. - :rtype: str - :returns: one of 'load', 'copy', 'extract', 'query' + Returns: + str: one of 'load', 'copy', 'extract', 'query'. """ return self._JOB_TYPE @@ -409,8 +410,8 @@ def job_type(self): def path(self): """URL path for the job's APIs. - :rtype: str - :returns: the path based on project and job ID. + Returns: + str: the path based on project and job ID. """ return "/projects/%s/jobs/%s" % (self.project, self.job_id) @@ -423,8 +424,8 @@ def labels(self): def etag(self): """ETag for the job resource. - :rtype: str, or ``NoneType`` - :returns: the ETag (None until set from the server). + Returns: + Optional[str]: the ETag (None until set from the server). """ return self._properties.get("etag") @@ -432,8 +433,8 @@ def etag(self): def self_link(self): """URL for the job resource. - :rtype: str, or ``NoneType`` - :returns: the URL (None until set from the server). + Returns: + Optional[str]: the URL (None until set from the server). """ return self._properties.get("selfLink") @@ -441,8 +442,8 @@ def self_link(self): def user_email(self): """E-mail address of user who submitted the job. - :rtype: str, or ``NoneType`` - :returns: the URL (None until set from the server). + Returns: + Optional[str]: the URL (None until set from the server). """ return self._properties.get("user_email") @@ -450,8 +451,9 @@ def user_email(self): def created(self): """Datetime at which the job was created. - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the creation time (None until set from the server). + Returns: + Optional[datetime.datetime]: + the creation time (None until set from the server). """ statistics = self._properties.get("statistics") if statistics is not None: @@ -463,8 +465,9 @@ def created(self): def started(self): """Datetime at which the job was started. - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the start time (None until set from the server). + Returns: + Optional[datetime.datetime]: + the start time (None until set from the server). """ statistics = self._properties.get("statistics") if statistics is not None: @@ -476,8 +479,9 @@ def started(self): def ended(self): """Datetime at which the job finished. - :rtype: ``datetime.datetime``, or ``NoneType`` - :returns: the end time (None until set from the server). + Returns: + Optional[datetime.datetime]: + the end time (None until set from the server). """ statistics = self._properties.get("statistics") if statistics is not None: @@ -494,8 +498,8 @@ def _job_statistics(self): def error_result(self): """Error information about the job as a whole. - :rtype: mapping, or ``NoneType`` - :returns: the error information (None until set from the server). + Returns: + Optional[Mapping]: the error information (None until set from the server). """ status = self._properties.get("status") if status is not None: @@ -505,8 +509,9 @@ def error_result(self): def errors(self): """Information about individual errors generated by the job. - :rtype: list of mappings, or ``NoneType`` - :returns: the error information (None until set from the server). + Returns: + Optional[List[Mapping]]: + the error information (None until set from the server). """ status = self._properties.get("status") if status is not None: @@ -516,8 +521,9 @@ def errors(self): def state(self): """Status of the job. - :rtype: str, or ``NoneType`` - :returns: the state (None until set from the server). + Returns: + Optional[str]: + the state (None until set from the server). """ status = self._properties.get("status") if status is not None: @@ -534,8 +540,8 @@ def _copy_configuration_properties(self, configuration): def _set_properties(self, api_response): """Update properties from resource in body of ``api_response`` - :type api_response: dict - :param api_response: response returned from an API call + Args: + api_response (Dict): response returned from an API call. """ cleaned = api_response.copy() self._scrub_local_properties(cleaned) @@ -559,14 +565,18 @@ def _set_properties(self, api_response): def _get_resource_config(cls, resource): """Helper for :meth:`from_api_repr` - :type resource: dict - :param resource: resource for the job + Args: + resource (Dict): resource for the job. + + Returns: + (str, Dict): + tuple (string, dict), where the first element is the + job ID and the second contains job-specific configuration. - :rtype: dict - :returns: tuple (string, dict), where the first element is the - job ID and the second contains job-specific configuration. - :raises: :class:`KeyError` if the resource has no identifier, or - is missing the appropriate configuration. + Raises: + KeyError: + If the resource has no identifier, or + is missing the appropriate configuration. """ if "jobReference" not in resource or "jobId" not in resource["jobReference"]: raise KeyError( @@ -626,16 +636,15 @@ def exists(self, client=None, retry=DEFAULT_RETRY): See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get - :type client: :class:`~google.cloud.bigquery.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. + Args: + client (Optional[google.cloud.bigquery.client.Client]): + the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. - :type retry: :class:`google.api_core.retry.Retry` - :param retry: (Optional) How to retry the RPC. + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. - :rtype: bool - :returns: Boolean indicating existence of the job. + Returns: + bool: Boolean indicating existence of the job. """ client = self._require_client(client) @@ -658,13 +667,12 @@ def reload(self, client=None, retry=DEFAULT_RETRY): See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get - :type client: :class:`~google.cloud.bigquery.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. + Args: + client (Optional[google.cloud.bigquery.client.Client]): + the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. - :type retry: :class:`google.api_core.retry.Retry` - :param retry: (Optional) How to retry the RPC. + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. """ client = self._require_client(client) @@ -683,13 +691,13 @@ def cancel(self, client=None): See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel - :type client: :class:`~google.cloud.bigquery.client.Client` or - ``NoneType`` - :param client: the client to use. If not passed, falls back to the - ``client`` stored on the current dataset. + Args: + client (Optional[google.cloud.bigquery.client.Client]): + the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. - :rtype: bool - :returns: Boolean indicating that the cancel request was sent. + Returns: + bool: Boolean indicating that the cancel request was sent. """ client = self._require_client(client) @@ -732,11 +740,11 @@ def _set_future_result(self): def done(self, retry=DEFAULT_RETRY): """Refresh the job and checks if it is complete. - :type retry: :class:`google.api_core.retry.Retry` - :param retry: (Optional) How to retry the RPC. + Args: + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. - :rtype: bool - :returns: True if the job is complete, False otherwise. + Returns: + bool: True if the job is complete, False otherwise. """ # Do not refresh is the state is already done, as the job will not # change once complete. @@ -747,21 +755,21 @@ def done(self, retry=DEFAULT_RETRY): def result(self, timeout=None, retry=DEFAULT_RETRY): """Start the job and wait for it to complete and get the result. - :type timeout: float - :param timeout: - How long (in seconds) to wait for job to complete before raising - a :class:`concurrent.futures.TimeoutError`. + Args: + timeout (float): + How long (in seconds) to wait for job to complete before raising + a :class:`concurrent.futures.TimeoutError`. - :type retry: :class:`google.api_core.retry.Retry` - :param retry: (Optional) How to retry the RPC. + retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. - :rtype: _AsyncJob - :returns: This instance. + Returns: + _AsyncJob: This instance. - :raises: - :class:`~google.cloud.exceptions.GoogleCloudError` if the job - failed or :class:`concurrent.futures.TimeoutError` if the job did - not complete in the given timeout. + Raises: + google.cloud.exceptions.GoogleCloudError: + if the job failed. + concurrent.futures.TimeoutError: + if the job did not complete in the given timeout. """ if self.state is None: self._begin(retry=retry) @@ -775,8 +783,8 @@ def cancelled(self): cancelled in the API. This method is here to satisfy the interface for :class:`google.api_core.future.Future`. - :rtype: bool - :returns: False + Returns: + bool: False """ return ( self.error_result is not None @@ -787,7 +795,7 @@ def cancelled(self): class _JobConfig(object): """Abstract base class for job configuration objects. - Arguments: + Args: job_type (str): The key to use for the job configuration. """ @@ -830,10 +838,10 @@ def _get_sub_prop(self, key, default=None): _helpers._get_sub_prop( self._properties, ['query', 'destinationTable']) - Arguments: + Args: key (str): - Key for the value to get in the - ``self._properties[self._job_type]`` dictionary. + Key for the value to get in the + ``self._properties[self._job_type]`` dictionary. default (object): (Optional) Default value to return if the key is not found. Defaults to :data:`None`. @@ -859,10 +867,10 @@ def _set_sub_prop(self, key, value): _helper._set_sub_prop( self._properties, ['query', 'useLegacySql'], False) - Arguments: + Args: key (str): - Key to set in the ``self._properties[self._job_type]`` - dictionary. + Key to set in the ``self._properties[self._job_type]`` + dictionary. value (object): Value to set. """ _helpers._set_sub_prop(self._properties, [self._job_type, key], value) @@ -881,18 +889,18 @@ def _del_sub_prop(self, key): _helper._del_sub_prop( self._properties, ['query', 'useLegacySql']) - Arguments: + Args: key (str): - Key to remove in the ``self._properties[self._job_type]`` - dictionary. + Key to remove in the ``self._properties[self._job_type]`` + dictionary. """ _helpers._del_sub_prop(self._properties, [self._job_type, key]) def to_api_repr(self): """Build an API representation of the job config. - :rtype: dict - :returns: A dictionary in the format used by the BigQuery API. + Returns: + Dict: A dictionary in the format used by the BigQuery API. """ return copy.deepcopy(self._properties) @@ -903,12 +911,12 @@ def _fill_from_default(self, default_job_config): config. The merge is done at the top-level as well as for keys one level below the job type. - Arguments: + Args: default_job_config (google.cloud.bigquery.job._JobConfig): The default job config that will be used to fill in self. Returns: - google.cloud.bigquery.job._JobConfig A new (merged) job config. + google.cloud.bigquery.job._JobConfig: A new (merged) job config. """ if self._job_type != default_job_config._job_type: raise TypeError( @@ -934,13 +942,13 @@ def _fill_from_default(self, default_job_config): def from_api_repr(cls, resource): """Factory: construct a job configuration given its API representation - :type resource: dict - :param resource: - An extract job configuration in the same representation as is - returned from the API. + Args: + resource (Dict): + An extract job configuration in the same representation as is + returned from the API. - :rtype: :class:`google.cloud.bigquery.job._JobConfig` - :returns: Configuration parsed from ``resource``. + Returns: + google.cloud.bigquery.job._JobConfig: Configuration parsed from ``resource``. """ config = cls() config._properties = copy.deepcopy(resource) @@ -1335,21 +1343,19 @@ class LoadJob(_AsyncJob): Can load from Google Cloud Storage URIs or from a file. - :type job_id: str - :param job_id: the job's ID + Args: + job_id (str): the job's ID - :type source_uris: sequence of string or ``NoneType`` - :param source_uris: - URIs of one or more data files to be loaded. See - https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.source_uris - for supported URI formats. Pass None for jobs that load from a file. + source_uris (Optional[Sequence[str]]): + URIs of one or more data files to be loaded. See + https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationLoad.FIELDS.source_uris + for supported URI formats. Pass None for jobs that load from a file. - :type destination: :class:`google.cloud.bigquery.table.TableReference` - :param destination: reference to table into which data is to be loaded. + destination (google.cloud.bigquery.table.TableReference): reference to table into which data is to be loaded. - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). + client (google.cloud.bigquery.client.Client): + A client which holds credentials and project configuration + for the dataset (which requires a project). """ _JOB_TYPE = "load" @@ -1486,7 +1492,7 @@ def destination_encryption_configuration(self): @property def destination_table_description(self): - """Union[str, None] name given to destination table. + """Optional[str] name given to destination table. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#DestinationTableProperties.FIELDS.description @@ -1495,7 +1501,7 @@ def destination_table_description(self): @property def destination_table_friendly_name(self): - """Union[str, None] name given to destination table. + """Optional[str] name given to destination table. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#DestinationTableProperties.FIELDS.friendly_name @@ -1541,9 +1547,11 @@ def schema_update_options(self): def input_file_bytes(self): """Count of bytes loaded from source files. - :rtype: int, or ``NoneType`` - :returns: the count (None until set from the server). - :raises: ValueError for invalid value types. + Returns: + Optional[int]: the count (None until set from the server). + + Raises: + ValueError: for invalid value types. """ return _helpers._int_or_none( _helpers._get_sub_prop( @@ -1555,8 +1563,8 @@ def input_file_bytes(self): def input_files(self): """Count of source files. - :rtype: int, or ``NoneType`` - :returns: the count (None until set from the server). + Returns: + Optional[int]: the count (None until set from the server). """ return _helpers._int_or_none( _helpers._get_sub_prop( @@ -1568,8 +1576,8 @@ def input_files(self): def output_bytes(self): """Count of bytes saved to destination table. - :rtype: int, or ``NoneType`` - :returns: the count (None until set from the server). + Returns: + Optional[int]: the count (None until set from the server). """ return _helpers._int_or_none( _helpers._get_sub_prop( @@ -1581,8 +1589,8 @@ def output_bytes(self): def output_rows(self): """Count of rows saved to destination table. - :rtype: int, or ``NoneType`` - :returns: the count (None until set from the server). + Returns: + Optional[int]: the count (None until set from the server). """ return _helpers._int_or_none( _helpers._get_sub_prop( @@ -1619,15 +1627,15 @@ def from_api_repr(cls, resource, client): This method assumes that the project found in the resource matches the client's project. - :type resource: dict - :param resource: dataset job representation returned from the API + Args: + resource (Dict): dataset job representation returned from the API - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. + client (google.cloud.bigquery.client.Client): + Client which holds credentials and project + configuration for the dataset. - :rtype: :class:`google.cloud.bigquery.job.LoadJob` - :returns: Job parsed from ``resource``. + Returns: + google.cloud.bigquery.job.LoadJob: Job parsed from ``resource``. """ config_resource = resource.get("configuration", {}) config = LoadJobConfig.from_api_repr(config_resource) @@ -1709,22 +1717,19 @@ def destination_encryption_configuration(self, value): class CopyJob(_AsyncJob): """Asynchronous job: copy data into a table from other tables. - :type job_id: str - :param job_id: the job's ID, within the project belonging to ``client``. + Args: + job_id (str): the job's ID, within the project belonging to ``client``. - :type sources: list of :class:`google.cloud.bigquery.table.TableReference` - :param sources: Table from which data is to be loaded. + sources (List[google.cloud.bigquery.table.TableReference]): Table from which data is to be loaded. - :type destination: :class:`google.cloud.bigquery.table.TableReference` - :param destination: Table into which data is to be loaded. + destination (google.cloud.bigquery.table.TableReference): Table into which data is to be loaded. - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). + client (google.cloud.bigquery.client.Client): + A client which holds credentials and project configuration + for the dataset (which requires a project). - :type job_config: :class:`~google.cloud.bigquery.job.CopyJobConfig` - :param job_config: - (Optional) Extra configuration options for the copy job. + job_config (google.cloud.bigquery.job.CopyJobConfig): + (Optional) Extra configuration options for the copy job. """ _JOB_TYPE = "copy" @@ -1808,15 +1813,15 @@ def from_api_repr(cls, resource, client): This method assumes that the project found in the resource matches the client's project. - :type resource: dict - :param resource: dataset job representation returned from the API + Args: + resource (Dict): dataset job representation returned from the API - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. + client (google.cloud.bigquery.client.Client): + Client which holds credentials and project + configuration for the dataset. - :rtype: :class:`google.cloud.bigquery.job.CopyJob` - :returns: Job parsed from ``resource``. + Returns: + google.cloud.bigquery.job.CopyJob: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = CopyJobConfig.from_api_repr(config_resource) @@ -1906,24 +1911,21 @@ def print_header(self, value): class ExtractJob(_AsyncJob): """Asynchronous job: extract data from a table into Cloud Storage. - :type job_id: str - :param job_id: the job's ID + Args: + job_id (str): the job's ID. - :type source: :class:`google.cloud.bigquery.table.TableReference` - :param source: Table into which data is to be loaded. + source (google.cloud.bigquery.table.TableReference): + Table into which data is to be loaded. - :type destination_uris: list of string - :param destination_uris: - URIs describing where the extracted data will be written in Cloud - Storage, using the format ``gs:///``. + destination_uris (List[str]): + URIs describing where the extracted data will be written in Cloud + Storage, using the format ``gs:///``. - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: - A client which holds credentials and project configuration. + client (google.cloud.bigquery.client.Client): + A client which holds credentials and project configuration. - :type job_config: :class:`~google.cloud.bigquery.job.ExtractJobConfig` - :param job_config: - (Optional) Extra configuration options for the extract job. + job_config (google.cloud.bigquery.job.ExtractJobConfig): + (Optional) Extra configuration options for the extract job. """ _JOB_TYPE = "extract" @@ -1974,11 +1976,12 @@ def destination_uri_file_counts(self): https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics4.FIELDS.destination_uri_file_counts Returns: - a list of integer counts, each representing the number of files - per destination URI or URI pattern specified in the extract - configuration. These values will be in the same order as the URIs - specified in the 'destinationUris' field. Returns None if job is - not yet complete. + List[int]: + A list of integer counts, each representing the number of files + per destination URI or URI pattern specified in the extract + configuration. These values will be in the same order as the URIs + specified in the 'destinationUris' field. Returns None if job is + not yet complete. """ counts = self._job_statistics().get("destinationUriFileCounts") if counts is not None: @@ -2018,15 +2021,15 @@ def from_api_repr(cls, resource, client): This method assumes that the project found in the resource matches the client's project. - :type resource: dict - :param resource: dataset job representation returned from the API + Args: + resource (Dict): dataset job representation returned from the API - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. + client (google.cloud.bigquery.client.Client): + Client which holds credentials and project + configuration for the dataset. - :rtype: :class:`google.cloud.bigquery.job.ExtractJob` - :returns: Job parsed from ``resource``. + Returns: + google.cloud.bigquery.job.ExtractJob: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = ExtractJobConfig.from_api_repr(config_resource) @@ -2425,7 +2428,7 @@ def time_partitioning(self, value): @property def clustering_fields(self): - """Union[List[str], None]: Fields defining clustering for the table + """Optional[List[str]]: Fields defining clustering for the table (Defaults to :data:`None`). @@ -2442,7 +2445,7 @@ def clustering_fields(self): @clustering_fields.setter def clustering_fields(self, value): - """Union[List[str], None]: Fields defining clustering for the table + """Optional[List[str]]: Fields defining clustering for the table (Defaults to :data:`None`). """ @@ -2467,7 +2470,7 @@ def to_api_repr(self): """Build an API representation of the query job config. Returns: - dict: A dictionary in the format used by the BigQuery API. + Dict: A dictionary in the format used by the BigQuery API. """ resource = copy.deepcopy(self._properties) @@ -2486,19 +2489,17 @@ def to_api_repr(self): class QueryJob(_AsyncJob): """Asynchronous job: query tables. - :type job_id: str - :param job_id: the job's ID, within the project belonging to ``client``. + Args: + job_id (str): the job's ID, within the project belonging to ``client``. - :type query: str - :param query: SQL query string + query (str): SQL query string. - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: A client which holds credentials and project configuration - for the dataset (which requires a project). + client (google.cloud.bigquery.client.Client): + A client which holds credentials and project configuration + for the dataset (which requires a project). - :type job_config: :class:`~google.cloud.bigquery.job.QueryJobConfig` - :param job_config: - (Optional) Extra configuration options for the query job. + job_config (google.cloud.bigquery.job.QueryJobConfig): + (Optional) Extra configuration options for the query job. """ _JOB_TYPE = "query" @@ -2697,15 +2698,15 @@ def _copy_configuration_properties(self, configuration): def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation - :type resource: dict - :param resource: dataset job representation returned from the API + Args: + resource (Dict): dataset job representation returned from the API - :type client: :class:`google.cloud.bigquery.client.Client` - :param client: Client which holds credentials and project - configuration for the dataset. + client (google.cloud.bigquery.client.Client): + Client which holds credentials and project + configuration for the dataset. - :rtype: :class:`google.cloud.bigquery.job.QueryJob` - :returns: Job parsed from ``resource``. + Returns: + google.cloud.bigquery.job.QueryJob: Job parsed from ``resource``. """ job_id, config = cls._get_resource_config(resource) query = _helpers._get_sub_prop(config, ["query", "query"]) @@ -2720,9 +2721,10 @@ def query_plan(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.query_plan - :rtype: list of :class:`QueryPlanEntry` - :returns: mappings describing the query plan, or an empty list - if the query has not yet completed. + Returns: + List[QueryPlanEntry]: + mappings describing the query plan, or an empty list + if the query has not yet completed. """ plan_entries = self._job_statistics().get("queryPlan", ()) return [QueryPlanEntry.from_api_repr(entry) for entry in plan_entries] @@ -2742,9 +2744,10 @@ def total_bytes_processed(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.total_bytes_processed - :rtype: int or None - :returns: total bytes processed by the job, or None if job is not - yet complete. + Returns: + Optional[int]: + Total bytes processed by the job, or None if job is not + yet complete. """ result = self._job_statistics().get("totalBytesProcessed") if result is not None: @@ -2758,9 +2761,10 @@ def total_bytes_billed(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.total_bytes_billed - :rtype: int or None - :returns: total bytes processed by the job, or None if job is not - yet complete. + Returns: + Optional[int]: + Total bytes processed by the job, or None if job is not + yet complete. """ result = self._job_statistics().get("totalBytesBilled") if result is not None: @@ -2774,9 +2778,10 @@ def billing_tier(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.billing_tier - :rtype: int or None - :returns: billing tier used by the job, or None if job is not - yet complete. + Returns: + Optional[int]: + Billing tier used by the job, or None if job is not + yet complete. """ return self._job_statistics().get("billingTier") @@ -2787,9 +2792,10 @@ def cache_hit(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.cache_hit - :rtype: bool or None - :returns: whether the query results were returned from cache, or None - if job is not yet complete. + Returns: + Optional[bool]: + whether the query results were returned from cache, or None + if job is not yet complete. """ return self._job_statistics().get("cacheHit") @@ -2836,9 +2842,10 @@ def num_dml_affected_rows(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.num_dml_affected_rows - :rtype: int or None - :returns: number of DML rows affected by the job, or None if job is not - yet complete. + Returns: + Optional[int]: + number of DML rows affected by the job, or None if job is not + yet complete. """ result = self._job_statistics().get("numDmlAffectedRows") if result is not None: @@ -2857,9 +2864,10 @@ def statement_type(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.statement_type - :rtype: str or None - :returns: type of statement used by the job, or None if job is not - yet complete. + Returns: + Optional[str]: + type of statement used by the job, or None if job is not + yet complete. """ return self._job_statistics().get("statementType") @@ -2870,9 +2878,10 @@ def referenced_tables(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.referenced_tables - :rtype: list of dict - :returns: mappings describing the query plan, or an empty list - if the query has not yet completed. + Returns: + List[Dict]: + mappings describing the query plan, or an empty list + if the query has not yet completed. """ tables = [] datasets_by_project_name = {} @@ -2899,13 +2908,14 @@ def undeclared_query_parameters(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.undeclared_query_parameters - :rtype: - list of - :class:`~google.cloud.bigquery.ArrayQueryParameter`, - :class:`~google.cloud.bigquery.ScalarQueryParameter`, or - :class:`~google.cloud.bigquery.StructQueryParameter` - :returns: undeclared parameters, or an empty list if the query has - not yet completed. + Returns: + List[Union[ \ + google.cloud.bigquery.query.ArrayQueryParameter, \ + google.cloud.bigquery.query.ScalarQueryParameter, \ + google.cloud.bigquery.query.StructQueryParameter \ + ]]: + Undeclared parameters, or an empty list if the query has + not yet completed. """ parameters = [] undeclared = self._job_statistics().get("undeclaredQueryParameters", ()) @@ -2931,9 +2941,10 @@ def estimated_bytes_processed(self): See: https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.estimated_bytes_processed - :rtype: int or None - :returns: number of DML rows affected by the job, or None if job is not - yet complete. + Returns: + Optional[int]: + number of DML rows affected by the job, or None if job is not + yet complete. """ result = self._job_statistics().get("estimatedBytesProcessed") if result is not None: @@ -2943,8 +2954,8 @@ def estimated_bytes_processed(self): def done(self, retry=DEFAULT_RETRY): """Refresh the job and checks if it is complete. - :rtype: bool - :returns: True if the job is complete, False otherwise. + Returns: + bool: True if the job is complete, False otherwise. """ # Since the API to getQueryResults can hang up to the timeout value # (default of 10 seconds), set the timeout parameter to ensure that @@ -2990,8 +3001,8 @@ def _format_for_exception(query, job_id): query (str): The SQL query to format. job_id (str): The ID of the job that ran the query. - Returns: (str) - A formatted query text. + Returns: + str: A formatted query text. """ template = "\n\n(job ID: {job_id})\n\n{header}\n\n{ruler}\n{body}\n{ruler}" @@ -3026,8 +3037,7 @@ def _begin(self, client=None, retry=DEFAULT_RETRY): How to retry the RPC. Raises: - ValueError: - If the job has already begun. + ValueError: If the job has already begun. """ try: @@ -3122,9 +3132,7 @@ def to_arrow(self, progress_bar_type=None, bqstorage_client=None): ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. - bqstorage_client ( \ - google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient \ - ): + bqstorage_client (google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient): **Beta Feature** Optional. A BigQuery Storage API client. If supplied, use the faster BigQuery Storage API to fetch rows from BigQuery. This API is a billable API. @@ -3157,9 +3165,7 @@ def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=Non """Return a pandas DataFrame from a QueryJob Args: - bqstorage_client ( \ - google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient \ - ): + bqstorage_client (google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient): **Alpha Feature** Optional. A BigQuery Storage API client. If supplied, use the faster BigQuery Storage API to fetch rows from BigQuery. This API is a billable API. @@ -3173,9 +3179,7 @@ def to_dataframe(self, bqstorage_client=None, dtypes=None, progress_bar_type=Non **Caution**: There is a known issue reading small anonymous query result tables with the BQ Storage API. Write your query results to a destination table to work around this issue. - dtypes ( \ - Map[str, Union[str, pandas.Series.dtype]] \ - ): + dtypes (Map[str, Union[str, pandas.Series.dtype]]): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior @@ -3212,11 +3216,10 @@ def __iter__(self): class QueryPlanEntryStep(object): """Map a single step in a query plan entry. - :type kind: str - :param kind: step type + Args: + kind (str): step type. - :type substeps: - :param substeps: names of substeps + substeps (List): names of substeps. """ def __init__(self, kind, substeps): @@ -3227,11 +3230,11 @@ def __init__(self, kind, substeps): def from_api_repr(cls, resource): """Factory: construct instance from the JSON repr. - :type resource: dict - :param resource: JSON representation of the entry + Args: + resource (Dict): JSON representation of the entry. - :rtype: :class:`QueryPlanEntryStep` - :return: new instance built from the resource + Returns: + QueryPlanEntryStep: new instance built from the resource. """ return cls(kind=resource.get("kind"), substeps=resource.get("substeps", ())) @@ -3247,7 +3250,6 @@ class QueryPlanEntry(object): See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#ExplainQueryStage for the underlying API representation within query statistics. - """ def __init__(self): @@ -3259,11 +3261,11 @@ def from_api_repr(cls, resource): Args: resource(Dict[str: object]): - ExplainQueryStage representation returned from API + ExplainQueryStage representation returned from API. Returns: google.cloud.bigquery.QueryPlanEntry: - Query plan entry parsed from ``resource`` + Query plan entry parsed from ``resource``. """ entry = cls() entry._properties = resource @@ -3271,17 +3273,17 @@ def from_api_repr(cls, resource): @property def name(self): - """Union[str, None]: Human-readable name of the stage.""" + """Optional[str]: Human-readable name of the stage.""" return self._properties.get("name") @property def entry_id(self): - """Union[str, None]: Unique ID for the stage within the plan.""" + """Optional[str]: Unique ID for the stage within the plan.""" return self._properties.get("id") @property def start(self): - """Union[Datetime, None]: Datetime when the stage started.""" + """Optional[Datetime]: Datetime when the stage started.""" if self._properties.get("startMs") is None: return None return _helpers._datetime_from_microseconds( @@ -3290,7 +3292,7 @@ def start(self): @property def end(self): - """Union[Datetime, None]: Datetime when the stage ended.""" + """Optional[Datetime]: Datetime when the stage ended.""" if self._properties.get("endMs") is None: return None return _helpers._datetime_from_microseconds( @@ -3309,33 +3311,33 @@ def input_stages(self): @property def parallel_inputs(self): - """Union[int, None]: Number of parallel input segments within + """Optional[int]: Number of parallel input segments within the stage. """ return _helpers._int_or_none(self._properties.get("parallelInputs")) @property def completed_parallel_inputs(self): - """Union[int, None]: Number of parallel input segments completed.""" + """Optional[int]: Number of parallel input segments completed.""" return _helpers._int_or_none(self._properties.get("completedParallelInputs")) @property def wait_ms_avg(self): - """Union[int, None]: Milliseconds the average worker spent waiting to + """Optional[int]: Milliseconds the average worker spent waiting to be scheduled. """ return _helpers._int_or_none(self._properties.get("waitMsAvg")) @property def wait_ms_max(self): - """Union[int, None]: Milliseconds the slowest worker spent waiting to + """Optional[int]: Milliseconds the slowest worker spent waiting to be scheduled. """ return _helpers._int_or_none(self._properties.get("waitMsMax")) @property def wait_ratio_avg(self): - """Union[float, None]: Ratio of time the average worker spent waiting + """Optional[float]: Ratio of time the average worker spent waiting to be scheduled, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3343,7 +3345,7 @@ def wait_ratio_avg(self): @property def wait_ratio_max(self): - """Union[float, None]: Ratio of time the slowest worker spent waiting + """Optional[float]: Ratio of time the slowest worker spent waiting to be scheduled, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3351,21 +3353,21 @@ def wait_ratio_max(self): @property def read_ms_avg(self): - """Union[int, None]: Milliseconds the average worker spent reading + """Optional[int]: Milliseconds the average worker spent reading input. """ return _helpers._int_or_none(self._properties.get("readMsAvg")) @property def read_ms_max(self): - """Union[int, None]: Milliseconds the slowest worker spent reading + """Optional[int]: Milliseconds the slowest worker spent reading input. """ return _helpers._int_or_none(self._properties.get("readMsMax")) @property def read_ratio_avg(self): - """Union[float, None]: Ratio of time the average worker spent reading + """Optional[float]: Ratio of time the average worker spent reading input, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3373,7 +3375,7 @@ def read_ratio_avg(self): @property def read_ratio_max(self): - """Union[float, None]: Ratio of time the slowest worker spent reading + """Optional[float]: Ratio of time the slowest worker spent reading to be scheduled, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3381,21 +3383,21 @@ def read_ratio_max(self): @property def compute_ms_avg(self): - """Union[int, None]: Milliseconds the average worker spent on CPU-bound + """Optional[int]: Milliseconds the average worker spent on CPU-bound processing. """ return _helpers._int_or_none(self._properties.get("computeMsAvg")) @property def compute_ms_max(self): - """Union[int, None]: Milliseconds the slowest worker spent on CPU-bound + """Optional[int]: Milliseconds the slowest worker spent on CPU-bound processing. """ return _helpers._int_or_none(self._properties.get("computeMsMax")) @property def compute_ratio_avg(self): - """Union[float, None]: Ratio of time the average worker spent on + """Optional[float]: Ratio of time the average worker spent on CPU-bound processing, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3403,7 +3405,7 @@ def compute_ratio_avg(self): @property def compute_ratio_max(self): - """Union[float, None]: Ratio of time the slowest worker spent on + """Optional[float]: Ratio of time the slowest worker spent on CPU-bound processing, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3411,21 +3413,21 @@ def compute_ratio_max(self): @property def write_ms_avg(self): - """Union[int, None]: Milliseconds the average worker spent writing + """Optional[int]: Milliseconds the average worker spent writing output data. """ return _helpers._int_or_none(self._properties.get("writeMsAvg")) @property def write_ms_max(self): - """Union[int, None]: Milliseconds the slowest worker spent writing + """Optional[int]: Milliseconds the slowest worker spent writing output data. """ return _helpers._int_or_none(self._properties.get("writeMsMax")) @property def write_ratio_avg(self): - """Union[float, None]: Ratio of time the average worker spent writing + """Optional[float]: Ratio of time the average worker spent writing output data, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3433,7 +3435,7 @@ def write_ratio_avg(self): @property def write_ratio_max(self): - """Union[float, None]: Ratio of time the slowest worker spent writing + """Optional[float]: Ratio of time the slowest worker spent writing output data, relative to the longest time spent by any worker in any stage of the overall plan. """ @@ -3441,29 +3443,29 @@ def write_ratio_max(self): @property def records_read(self): - """Union[int, None]: Number of records read by this stage.""" + """Optional[int]: Number of records read by this stage.""" return _helpers._int_or_none(self._properties.get("recordsRead")) @property def records_written(self): - """Union[int, None]: Number of records written by this stage.""" + """Optional[int]: Number of records written by this stage.""" return _helpers._int_or_none(self._properties.get("recordsWritten")) @property def status(self): - """Union[str, None]: status of this stage.""" + """Optional[str]: status of this stage.""" return self._properties.get("status") @property def shuffle_output_bytes(self): - """Union[int, None]: Number of bytes written by this stage to + """Optional[int]: Number of bytes written by this stage to intermediate shuffle. """ return _helpers._int_or_none(self._properties.get("shuffleOutputBytes")) @property def shuffle_output_bytes_spilled(self): - """Union[int, None]: Number of bytes written by this stage to + """Optional[int]: Number of bytes written by this stage to intermediate shuffle and spilled to disk. """ return _helpers._int_or_none(self._properties.get("shuffleOutputBytesSpilled")) @@ -3486,7 +3488,6 @@ class TimelineEntry(object): See https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#querytimelinesample for the underlying API representation within query statistics. - """ def __init__(self): @@ -3498,11 +3499,11 @@ def from_api_repr(cls, resource): Args: resource(Dict[str: object]): - QueryTimelineSample representation returned from API + QueryTimelineSample representation returned from API. Returns: google.cloud.bigquery.TimelineEntry: - Timeline sample parsed from ``resource`` + Timeline sample parsed from ``resource``. """ entry = cls() entry._properties = resource @@ -3510,31 +3511,31 @@ def from_api_repr(cls, resource): @property def elapsed_ms(self): - """Union[int, None]: Milliseconds elapsed since start of query + """Optional[int]: Milliseconds elapsed since start of query execution.""" return _helpers._int_or_none(self._properties.get("elapsedMs")) @property def active_units(self): - """Union[int, None]: Current number of input units being processed + """Optional[int]: Current number of input units being processed by workers, reported as largest value since the last sample.""" return _helpers._int_or_none(self._properties.get("activeUnits")) @property def pending_units(self): - """Union[int, None]: Current number of input units remaining for + """Optional[int]: Current number of input units remaining for query stages active at this sample time.""" return _helpers._int_or_none(self._properties.get("pendingUnits")) @property def completed_units(self): - """Union[int, None]: Current number of input units completed by + """Optional[int]: Current number of input units completed by this query.""" return _helpers._int_or_none(self._properties.get("completedUnits")) @property def slot_millis(self): - """Union[int, None]: Cumulative slot-milliseconds consumed by + """Optional[int]: Cumulative slot-milliseconds consumed by this query.""" return _helpers._int_or_none(self._properties.get("totalSlotMs")) @@ -3547,7 +3548,7 @@ def from_api_repr(cls, resource, client): """Construct an UnknownJob from the JSON representation. Args: - resource (dict): JSON representation of a job. + resource (Dict): JSON representation of a job. client (google.cloud.bigquery.client.Client): Client connected to BigQuery API. @@ -3569,8 +3570,7 @@ class ScriptStackFrame(object): evaluation happened. Args: - resource (Map[str, Any]): - JSON representation of object. + resource (Map[str, Any]): JSON representation of object. """ def __init__(self, resource): @@ -3614,8 +3614,7 @@ class ScriptStatistics(object): """Statistics for a child job of a script. Args: - resource (Map[str, Any]): - JSON representation of object. + resource (Map[str, Any]): JSON representation of object. """ def __init__(self, resource): diff --git a/bigquery/google/cloud/bigquery/query.py b/bigquery/google/cloud/bigquery/query.py index 59fcd1a59c64..925f3e29d298 100644 --- a/bigquery/google/cloud/bigquery/query.py +++ b/bigquery/google/cloud/bigquery/query.py @@ -67,7 +67,8 @@ def from_api_repr(cls, resource): def to_api_repr(self): """Construct JSON API representation for the parameter. - :rtype: dict + Returns: + Dict: JSON representation for the parameter. """ raise NotImplementedError diff --git a/bigquery/google/cloud/bigquery/table.py b/bigquery/google/cloud/bigquery/table.py index 72ff8f71385c..7e36c582c42b 100644 --- a/bigquery/google/cloud/bigquery/table.py +++ b/bigquery/google/cloud/bigquery/table.py @@ -1292,9 +1292,9 @@ class RowIterator(HTTPIterator): Defaults to a sensible value set by the API. extra_params (Dict[str, object]): Extra query string parameters for the API call. - table (Union[ - google.cloud.bigquery.table.Table, - google.cloud.bigquery.table.TableReference, + table (Union[ \ + google.cloud.bigquery.table.Table, \ + google.cloud.bigquery.table.TableReference, \ ]): Optional. The table which these rows belong to, or a reference to it. Used to call the BigQuery Storage API to fetch rows. @@ -1463,9 +1463,7 @@ def to_arrow(self, progress_bar_type=None, bqstorage_client=None): ``'tqdm_gui'`` Use the :func:`tqdm.tqdm_gui` function to display a progress bar as a graphical dialog box. - bqstorage_client ( - google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient - ): + bqstorage_client (google.cloud.bigquery_storage_v1beta1.BigQueryStorageClient): **Beta Feature** Optional. A BigQuery Storage API client. If supplied, use the faster BigQuery Storage API to fetch rows from BigQuery. This API is a billable API.