Skip to content

Commit

Permalink
docs: Add documentation for enums (#166)
Browse files Browse the repository at this point in the history
* docs: Add documentation for enums

fix: Add context manager return types

chore: Update gapic-generator-python to v1.8.1
PiperOrigin-RevId: 503210727

Source-Link: googleapis/googleapis@a391fd1

Source-Link: googleapis/googleapis-gen@0080f83
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMDA4MGY4MzBkZWMzN2MzMzg0MTU3MDgyYmNlMjc5ZTM3MDc5ZWE1OCJ9

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
gcf-owl-bot[bot] and gcf-owl-bot[bot] authored Jan 20, 2023
1 parent 7e9a65b commit 09e8204
Show file tree
Hide file tree
Showing 13 changed files with 391 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ def sample_launch_flex_template():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "FlexTemplatesServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1071,7 +1071,7 @@ def sample_snapshot_job():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "JobsV1Beta3Client":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ def sample_list_job_messages():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "MessagesV1Beta3Client":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -738,7 +738,7 @@ def sample_get_stage_execution_details():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "MetricsV1Beta3Client":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -675,7 +675,7 @@ def sample_list_snapshots():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "SnapshotsV1Beta3Client":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -682,7 +682,7 @@ def sample_get_template():
# Done; return the response.
return response

def __enter__(self):
def __enter__(self) -> "TemplatesServiceClient":
return self

def __exit__(self, type, value, traceback):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,19 @@ class JobType(proto.Enum):
[google.dataflow.v1beta3.Job], which determines the way the Job is
managed by the Cloud Dataflow service (how workers are scheduled,
how inputs are sharded, etc).
Values:
JOB_TYPE_UNKNOWN (0):
The type of the job is unspecified, or
unknown.
JOB_TYPE_BATCH (1):
A batch job with a well-defined end point:
data is read, data is processed, data is
written, and the job is done.
JOB_TYPE_STREAMING (2):
A continuously streaming job with no end:
data is read, processed, and written
continuously.
"""
JOB_TYPE_UNKNOWN = 0
JOB_TYPE_BATCH = 1
Expand All @@ -56,6 +69,14 @@ class JobType(proto.Enum):
class FlexResourceSchedulingGoal(proto.Enum):
r"""Specifies the resource to optimize for in Flexible Resource
Scheduling.
Values:
FLEXRS_UNSPECIFIED (0):
Run in the default mode.
FLEXRS_SPEED_OPTIMIZED (1):
Optimize for lower execution time.
FLEXRS_COST_OPTIMIZED (2):
Optimize for lower cost.
"""
FLEXRS_UNSPECIFIED = 0
FLEXRS_SPEED_OPTIMIZED = 1
Expand All @@ -66,6 +87,19 @@ class TeardownPolicy(proto.Enum):
r"""Specifies what happens to a resource when a Cloud Dataflow
[google.dataflow.v1beta3.Job][google.dataflow.v1beta3.Job] has
completed.
Values:
TEARDOWN_POLICY_UNKNOWN (0):
The teardown policy isn't specified, or is
unknown.
TEARDOWN_ALWAYS (1):
Always teardown the resource.
TEARDOWN_ON_SUCCESS (2):
Teardown the resource on success. This is
useful for debugging failures.
TEARDOWN_NEVER (3):
Never teardown the resource. This is useful
for debugging and development.
"""
TEARDOWN_POLICY_UNKNOWN = 0
TEARDOWN_ALWAYS = 1
Expand All @@ -76,6 +110,21 @@ class TeardownPolicy(proto.Enum):
class DefaultPackageSet(proto.Enum):
r"""The default set of packages to be staged on a pool of
workers.
Values:
DEFAULT_PACKAGE_SET_UNKNOWN (0):
The default set of packages to stage is
unknown, or unspecified.
DEFAULT_PACKAGE_SET_NONE (1):
Indicates that no packages should be staged
at the worker unless explicitly specified by the
job.
DEFAULT_PACKAGE_SET_JAVA (2):
Stage packages typically useful to workers
written in Java.
DEFAULT_PACKAGE_SET_PYTHON (3):
Stage packages typically useful to workers
written in Python.
"""
DEFAULT_PACKAGE_SET_UNKNOWN = 0
DEFAULT_PACKAGE_SET_NONE = 1
Expand All @@ -88,6 +137,15 @@ class AutoscalingAlgorithm(proto.Enum):
worker processes to run at any given point in time, based on the
amount of data left to process, the number of workers, and how
quickly existing workers are processing data.
Values:
AUTOSCALING_ALGORITHM_UNKNOWN (0):
The algorithm is unknown, or unspecified.
AUTOSCALING_ALGORITHM_NONE (1):
Disable autoscaling.
AUTOSCALING_ALGORITHM_BASIC (2):
Increase worker count over time to reduce job
execution time.
"""
AUTOSCALING_ALGORITHM_UNKNOWN = 0
AUTOSCALING_ALGORITHM_NONE = 1
Expand All @@ -97,6 +155,14 @@ class AutoscalingAlgorithm(proto.Enum):
class WorkerIPAddressConfiguration(proto.Enum):
r"""Specifies how IP addresses should be allocated to the worker
machines.
Values:
WORKER_IP_UNSPECIFIED (0):
The configuration is unknown, or unspecified.
WORKER_IP_PUBLIC (1):
Workers should have public IP addresses.
WORKER_IP_PRIVATE (2):
Workers should have private IP addresses.
"""
WORKER_IP_UNSPECIFIED = 0
WORKER_IP_PUBLIC = 1
Expand All @@ -108,6 +174,14 @@ class ShuffleMode(proto.Enum):
which determines the approach data is shuffled during processing.
More details in:
https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#dataflow-shuffle
Values:
SHUFFLE_MODE_UNSPECIFIED (0):
Shuffle mode information is not available.
VM_BASED (1):
Shuffle is done on the worker VMs.
SERVICE_BASED (2):
Shuffle is done on the service side.
"""
SHUFFLE_MODE_UNSPECIFIED = 0
VM_BASED = 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,30 @@


class KindType(proto.Enum):
r"""Type of transform or stage operation."""
r"""Type of transform or stage operation.
Values:
UNKNOWN_KIND (0):
Unrecognized transform type.
PAR_DO_KIND (1):
ParDo transform.
GROUP_BY_KEY_KIND (2):
Group By Key transform.
FLATTEN_KIND (3):
Flatten transform.
READ_KIND (4):
Read transform.
WRITE_KIND (5):
Write transform.
CONSTANT_KIND (6):
Constructs from a constant value, such as
with Create.of.
SINGLETON_KIND (7):
Creates a Singleton view of a collection.
SHUFFLE_KIND (8):
Opening or closing a shuffle session, often
as part of a GroupByKey.
"""
UNKNOWN_KIND = 0
PAR_DO_KIND = 1
GROUP_BY_KEY_KIND = 2
Expand All @@ -74,6 +97,79 @@ class KindType(proto.Enum):
class JobState(proto.Enum):
r"""Describes the overall state of a
[google.dataflow.v1beta3.Job][google.dataflow.v1beta3.Job].
Values:
JOB_STATE_UNKNOWN (0):
The job's run state isn't specified.
JOB_STATE_STOPPED (1):
``JOB_STATE_STOPPED`` indicates that the job has not yet
started to run.
JOB_STATE_RUNNING (2):
``JOB_STATE_RUNNING`` indicates that the job is currently
running.
JOB_STATE_DONE (3):
``JOB_STATE_DONE`` indicates that the job has successfully
completed. This is a terminal job state. This state may be
set by the Cloud Dataflow service, as a transition from
``JOB_STATE_RUNNING``. It may also be set via a Cloud
Dataflow ``UpdateJob`` call, if the job has not yet reached
a terminal state.
JOB_STATE_FAILED (4):
``JOB_STATE_FAILED`` indicates that the job has failed. This
is a terminal job state. This state may only be set by the
Cloud Dataflow service, and only as a transition from
``JOB_STATE_RUNNING``.
JOB_STATE_CANCELLED (5):
``JOB_STATE_CANCELLED`` indicates that the job has been
explicitly cancelled. This is a terminal job state. This
state may only be set via a Cloud Dataflow ``UpdateJob``
call, and only if the job has not yet reached another
terminal state.
JOB_STATE_UPDATED (6):
``JOB_STATE_UPDATED`` indicates that the job was
successfully updated, meaning that this job was stopped and
another job was started, inheriting state from this one.
This is a terminal job state. This state may only be set by
the Cloud Dataflow service, and only as a transition from
``JOB_STATE_RUNNING``.
JOB_STATE_DRAINING (7):
``JOB_STATE_DRAINING`` indicates that the job is in the
process of draining. A draining job has stopped pulling from
its input sources and is processing any data that remains
in-flight. This state may be set via a Cloud Dataflow
``UpdateJob`` call, but only as a transition from
``JOB_STATE_RUNNING``. Jobs that are draining may only
transition to ``JOB_STATE_DRAINED``,
``JOB_STATE_CANCELLED``, or ``JOB_STATE_FAILED``.
JOB_STATE_DRAINED (8):
``JOB_STATE_DRAINED`` indicates that the job has been
drained. A drained job terminated by stopping pulling from
its input sources and processing any data that remained
in-flight when draining was requested. This state is a
terminal state, may only be set by the Cloud Dataflow
service, and only as a transition from
``JOB_STATE_DRAINING``.
JOB_STATE_PENDING (9):
``JOB_STATE_PENDING`` indicates that the job has been
created but is not yet running. Jobs that are pending may
only transition to ``JOB_STATE_RUNNING``, or
``JOB_STATE_FAILED``.
JOB_STATE_CANCELLING (10):
``JOB_STATE_CANCELLING`` indicates that the job has been
explicitly cancelled and is in the process of stopping. Jobs
that are cancelling may only transition to
``JOB_STATE_CANCELLED`` or ``JOB_STATE_FAILED``.
JOB_STATE_QUEUED (11):
``JOB_STATE_QUEUED`` indicates that the job has been created
but is being delayed until launch. Jobs that are queued may
only transition to ``JOB_STATE_PENDING`` or
``JOB_STATE_CANCELLED``.
JOB_STATE_RESOURCE_CLEANING_UP (12):
``JOB_STATE_RESOURCE_CLEANING_UP`` indicates that the batch
job's associated resources are currently being cleaned up
after a successful run. Currently, this is an opt-in
feature, please reach out to Cloud support team if you are
interested.
"""
JOB_STATE_UNKNOWN = 0
JOB_STATE_STOPPED = 1
Expand All @@ -93,6 +189,24 @@ class JobState(proto.Enum):
class JobView(proto.Enum):
r"""Selector for how much information is returned in Job
responses.
Values:
JOB_VIEW_UNKNOWN (0):
The job view to return isn't specified, or is unknown.
Responses will contain at least the ``JOB_VIEW_SUMMARY``
information, and may contain additional information.
JOB_VIEW_SUMMARY (1):
Request summary information only:
Project ID, Job ID, job name, job type, job
status, start/end time, and Cloud SDK version
details.
JOB_VIEW_ALL (2):
Request all information available for this
job.
JOB_VIEW_DESCRIPTION (3):
Request summary info and limited job
description data for steps, labels and
environment.
"""
JOB_VIEW_UNKNOWN = 0
JOB_VIEW_SUMMARY = 1
Expand Down Expand Up @@ -522,7 +636,24 @@ class SdkVersion(proto.Message):
"""

class SdkSupportStatus(proto.Enum):
r"""The support status of the SDK used to run the job."""
r"""The support status of the SDK used to run the job.
Values:
UNKNOWN (0):
Cloud Dataflow is unaware of this version.
SUPPORTED (1):
This is a known version of an SDK, and is
supported.
STALE (2):
A newer version of the SDK family exists, and
an update is recommended.
DEPRECATED (3):
This version of the SDK is deprecated and
will eventually be unsupported.
UNSUPPORTED (4):
Support for this SDK version has ended and it
should no longer be used.
"""
UNKNOWN = 0
SUPPORTED = 1
STALE = 2
Expand Down Expand Up @@ -1259,6 +1390,23 @@ class Filter(proto.Enum):
r"""This field filters out and returns jobs in the specified job
state. The order of data returned is determined by the filter
used, and is subject to change.
Values:
UNKNOWN (0):
The filter isn't specified, or is unknown. This returns all
jobs ordered on descending ``JobUuid``.
ALL (1):
Returns all running jobs first ordered on
creation timestamp, then returns all terminated
jobs ordered on the termination timestamp.
TERMINATED (2):
Filters the jobs that have a terminated state, ordered on
the termination timestamp. Example terminated states:
``JOB_STATE_STOPPED``, ``JOB_STATE_UPDATED``,
``JOB_STATE_DRAINED``, etc.
ACTIVE (3):
Filters the jobs that are running ordered on
the creation timestamp.
"""
UNKNOWN = 0
ALL = 1
Expand Down
Loading

0 comments on commit 09e8204

Please sign in to comment.