diff --git a/.coveragerc b/.coveragerc index ae05ce469e67..280c5674f5bd 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,7 +1,6 @@ [report] omit = */_generated/*.py - */_generated_v2/*.py show_missing = True exclude_lines = # Re-enable the standard pragma diff --git a/Makefile.bigtable_v1 b/Makefile.bigtable_v1 deleted file mode 100644 index d6e6a247a06f..000000000000 --- a/Makefile.bigtable_v1 +++ /dev/null @@ -1,81 +0,0 @@ -GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv -GENERATED_DIR=$(shell pwd)/generated_python -GENERATED_SUBDIR=_generated -BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) -PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc -GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb -BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client -BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-client-core-parent/bigtable-protos/src/main/proto - -help: - @echo 'Makefile for gcloud-python Bigtable protos ' - @echo ' ' - @echo ' make generate Generates the protobuf modules ' - @echo ' make check_generate Checks that generate succeeded ' - @echo ' make clean Clean generated files ' - -generate: - # Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools - [ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV) - $(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools - # Retrieve git repos that have our *.proto files. - [ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1 - cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master - [ -d $(GOOGLEAPIS_PROTOS_DIR) ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1 - cd $(GOOGLEAPIS_PROTOS_DIR) && git pull origin master - # Make the directory where our *_pb2.py files will go. - mkdir -p $(GENERATED_DIR) - # Generate all *_pb2.py files that require gRPC. - $(PROTOC_CMD) \ - --proto_path=$(BIGTABLE_PROTOS_DIR) \ - --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ - --python_out=$(GENERATED_DIR) \ - --grpc_python_out=$(GENERATED_DIR) \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/bigtable_service.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_service.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_service.proto - # Generate all *_pb2.py files that do not require gRPC. - $(PROTOC_CMD) \ - --proto_path=$(BIGTABLE_PROTOS_DIR) \ - --proto_path=$(GOOGLEAPIS_PROTOS_DIR) \ - --python_out=$(GENERATED_DIR) \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/bigtable_data.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/bigtable_service_messages.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_data.proto \ - $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/bigtable_table_service_messages.proto \ - # Move the newly generated *_pb2.py files into our library. - cp $(GENERATED_DIR)/google/bigtable/v1/* $(BIGTABLE_DIR) - cp $(GENERATED_DIR)/google/bigtable/admin/cluster/v1/* $(BIGTABLE_DIR) - cp $(GENERATED_DIR)/google/bigtable/admin/table/v1/* $(BIGTABLE_DIR) - # Remove all existing *.proto files before we replace - rm -f $(BIGTABLE_DIR)/*.proto - # Copy over the *.proto files into our library. - cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v1/*.proto $(BIGTABLE_DIR) - cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/cluster/v1/*.proto $(BIGTABLE_DIR) - cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/table/v1/*.proto $(BIGTABLE_DIR) - cp $(GOOGLEAPIS_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR) - # Rename all *.proto files in our library with an - # underscore and remove executable bit. - cd $(BIGTABLE_DIR) && \ - for filename in *.proto; do \ - chmod -x $$filename ; \ - mv $$filename _$$filename ; \ - done - # Separate the gRPC parts of the operations service from the - # non-gRPC parts so that the protos from `googleapis-common-protos` - # can be used without gRPC. - GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \ - GENERATED_SUBDIR=$(GENERATED_SUBDIR) \ - python scripts/make_operations_grpc.py - # Rewrite the imports in the generated *_pb2.py files. - python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py - -check_generate: - python scripts/check_generate.py - -clean: - rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) - -.PHONY: generate check_generate clean diff --git a/Makefile.bigtable_v2 b/Makefile.bigtable_v2 index 05681b1d55ed..5e4f43bcb9bd 100644 --- a/Makefile.bigtable_v2 +++ b/Makefile.bigtable_v2 @@ -1,6 +1,6 @@ GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv GENERATED_DIR=$(shell pwd)/generated_python -GENERATED_SUBDIR=_generated_v2 +GENERATED_SUBDIR=_generated BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR) PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb @@ -9,7 +9,6 @@ help: @echo 'Makefile for gcloud-python Bigtable protos ' @echo ' ' @echo ' make generate Generates the protobuf modules ' - @echo ' make check_generate Checks that generate succeeded ' @echo ' make clean Clean generated files ' generate: @@ -62,10 +61,7 @@ generate: # Rewrite the imports in the generated *_pb2.py files. python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py -check_generate: - python scripts/check_generate.py - clean: rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR) -.PHONY: generate check_generate clean +.PHONY: generate clean diff --git a/Makefile.datastore b/Makefile.datastore index 73665ef5f542..60772791ada7 100644 --- a/Makefile.datastore +++ b/Makefile.datastore @@ -8,7 +8,6 @@ help: @echo 'Makefile for gcloud-python Bigtable protos ' @echo ' ' @echo ' make generate Generates the protobuf modules ' - @echo ' make check_generate Checks that generate succeeded ' @echo ' make clean Clean generated files ' generate: @@ -48,10 +47,7 @@ generate: # Rewrite the imports in the generated *_pb2.py files. python scripts/rewrite_imports.py $(DATASTORE_DIR)/*pb2.py -check_generate: - python scripts/check_generate.py - clean: rm -fr $(GENERATED_DIR) -.PHONY: generate check_generate clean +.PHONY: generate clean diff --git a/gcloud/bigtable/_generated_v2/_bigtable.proto b/gcloud/bigtable/_generated/_bigtable.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_bigtable.proto rename to gcloud/bigtable/_generated/_bigtable.proto diff --git a/gcloud/bigtable/_generated/_bigtable_cluster_data.proto b/gcloud/bigtable/_generated/_bigtable_cluster_data.proto deleted file mode 100644 index c0f8a93f2862..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_cluster_data.proto +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/timestamp.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterDataProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// A physical location in which a particular project can allocate Cloud BigTable -// resources. -message Zone { - // Possible states of a zone. - enum Status { - // The state of the zone is unknown or unspecified. - UNKNOWN = 0; - - // The zone is in a good state. - OK = 1; - - // The zone is down for planned maintenance. - PLANNED_MAINTENANCE = 2; - - // The zone is down for emergency or unplanned maintenance. - EMERGENCY_MAINENANCE = 3; - } - - // A permanent unique identifier for the zone. - // Values are of the form projects//zones/[a-z][-a-z0-9]* - string name = 1; - - // The name of this zone as it appears in UIs. - string display_name = 2; - - // The current state of this zone. - Status status = 3; -} - -// An isolated set of Cloud BigTable resources on which tables can be hosted. -message Cluster { - // A permanent unique identifier for the cluster. For technical reasons, the - // zone in which the cluster resides is included here. - // Values are of the form - // projects//zones//clusters/[a-z][-a-z0-9]* - string name = 1; - - // The operation currently running on the cluster, if any. - // This cannot be set directly, only through CreateCluster, UpdateCluster, - // or UndeleteCluster. Calls to these methods will be rejected if - // "current_operation" is already set. - google.longrunning.Operation current_operation = 3; - - // The descriptive name for this cluster as it appears in UIs. - // Must be unique per zone. - string display_name = 4; - - // The number of serve nodes allocated to this cluster. - int32 serve_nodes = 5; - - // What storage type to use for tables in this cluster. Only configurable at - // cluster creation time. If unspecified, STORAGE_SSD will be used. - StorageType default_storage_type = 8; -} - -enum StorageType { - // The storage type used is unspecified. - STORAGE_UNSPECIFIED = 0; - - // Data will be stored in SSD, providing low and consistent latencies. - STORAGE_SSD = 1; - - // Data will be stored in HDD, providing high and less predictable - // latencies. - STORAGE_HDD = 2; -} diff --git a/gcloud/bigtable/_generated/_bigtable_cluster_service.proto b/gcloud/bigtable/_generated/_bigtable_cluster_service.proto deleted file mode 100644 index e65bca4de740..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_cluster_service.proto +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/bigtable/admin/cluster/v1/bigtable_cluster_service_messages.proto"; -import "google/longrunning/operations.proto"; -import "google/protobuf/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServicesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Service for managing zonal Cloud Bigtable resources. -service BigtableClusterService { - // Lists the supported zones for the given project. - rpc ListZones(ListZonesRequest) returns (ListZonesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/zones" }; - } - - // Gets information about a particular cluster. - rpc GetCluster(GetClusterRequest) returns (Cluster) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Lists all clusters in the given project, along with any zones for which - // cluster information could not be retrieved. - rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*}/aggregated/clusters" }; - } - - // Creates a cluster and begins preparing it to begin serving. The returned - // cluster embeds as its "current_operation" a long-running operation which - // can be used to track the progress of turning up the new cluster. - // Immediately upon completion of this request: - // * The cluster will be readable via the API, with all requested attributes - // but no allocated resources. - // Until completion of the embedded operation: - // * Cancelling the operation will render the cluster immediately unreadable - // via the API. - // * All other attempts to modify or delete the cluster will be rejected. - // Upon completion of the embedded operation: - // * Billing for all successfully-allocated resources will begin (some types - // may have lower than the requested levels). - // * New tables can be created in the cluster. - // * The cluster's allocated resource levels will be readable via the API. - // The embedded operation's "metadata" field type is - // [CreateClusterMetadata][google.bigtable.admin.cluster.v1.CreateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc CreateCluster(CreateClusterRequest) returns (Cluster) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*}/clusters" body: "*" }; - } - - // Updates a cluster, and begins allocating or releasing resources as - // requested. The returned cluster embeds as its "current_operation" a - // long-running operation which can be used to track the progress of updating - // the cluster. - // Immediately upon completion of this request: - // * For resource types where a decrease in the cluster's allocation has been - // requested, billing will be based on the newly-requested level. - // Until completion of the embedded operation: - // * Cancelling the operation will set its metadata's "cancelled_at_time", - // and begin restoring resources to their pre-request values. The operation - // is guaranteed to succeed at undoing all resource changes, after which - // point it will terminate with a CANCELLED status. - // * All other attempts to modify or delete the cluster will be rejected. - // * Reading the cluster via the API will continue to give the pre-request - // resource levels. - // Upon completion of the embedded operation: - // * Billing will begin for all successfully-allocated resources (some types - // may have lower than the requested levels). - // * All newly-reserved resources will be available for serving the cluster's - // tables. - // * The cluster's new resource levels will be readable via the API. - // [UpdateClusterMetadata][google.bigtable.admin.cluster.v1.UpdateClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UpdateCluster(Cluster) returns (Cluster) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*}" body: "*" }; - } - - // Marks a cluster and all of its tables for permanent deletion in 7 days. - // Immediately upon completion of the request: - // * Billing will cease for all of the cluster's reserved resources. - // * The cluster's "delete_time" field will be set 7 days in the future. - // Soon afterward: - // * All tables within the cluster will become unavailable. - // Prior to the cluster's "delete_time": - // * The cluster can be recovered with a call to UndeleteCluster. - // * All other attempts to modify or delete the cluster will be rejected. - // At the cluster's "delete_time": - // * The cluster and *all of its tables* will immediately and irrevocably - // disappear from the API, and their data will be permanently deleted. - rpc DeleteCluster(DeleteClusterRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*}" }; - } - - // Cancels the scheduled deletion of an cluster and begins preparing it to - // resume serving. The returned operation will also be embedded as the - // cluster's "current_operation". - // Immediately upon completion of this request: - // * The cluster's "delete_time" field will be unset, protecting it from - // automatic deletion. - // Until completion of the returned operation: - // * The operation cannot be cancelled. - // Upon completion of the returned operation: - // * Billing for the cluster's resources will resume. - // * All tables within the cluster will be available. - // [UndeleteClusterMetadata][google.bigtable.admin.cluster.v1.UndeleteClusterMetadata] The embedded operation's "response" field type is - // [Cluster][google.bigtable.admin.cluster.v1.Cluster], if successful. - rpc UndeleteCluster(UndeleteClusterRequest) returns (google.longrunning.Operation) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}:undelete" body: "null" }; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_cluster_service_messages.proto b/gcloud/bigtable/_generated/_bigtable_cluster_service_messages.proto deleted file mode 100644 index 3291969375c5..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_cluster_service_messages.proto +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.cluster.v1; - -import "google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto"; -import "google/protobuf/timestamp.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableClusterServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.cluster.v1"; - - -// Request message for BigtableClusterService.ListZones. -message ListZonesRequest { - // The unique name of the project for which a list of supported zones is - // requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListZones. -message ListZonesResponse { - // The list of requested zones. - repeated Zone zones = 1; -} - -// Request message for BigtableClusterService.GetCluster. -message GetClusterRequest { - // The unique name of the requested cluster. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.ListClusters. -message ListClustersRequest { - // The unique name of the project for which a list of clusters is requested. - // Values are of the form projects/ - string name = 1; -} - -// Response message for BigtableClusterService.ListClusters. -message ListClustersResponse { - // The list of requested Clusters. - repeated Cluster clusters = 1; - - // The zones for which clusters could not be retrieved. - repeated Zone failed_zones = 2; -} - -// Request message for BigtableClusterService.CreateCluster. -message CreateClusterRequest { - // The unique name of the zone in which to create the cluster. - // Values are of the form projects//zones/ - string name = 1; - - // The id to be used when referring to the new cluster within its zone, - // e.g. just the "test-cluster" section of the full name - // "projects//zones//clusters/test-cluster". - string cluster_id = 2; - - // The cluster to create. - // The "name", "delete_time", and "current_operation" fields must be left - // blank. - Cluster cluster = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.CreateCluster. -message CreateClusterMetadata { - // The request which prompted the creation of this operation. - CreateClusterRequest original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 3; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UpdateCluster. -message UpdateClusterMetadata { - // The request which prompted the creation of this operation. - Cluster original_request = 1; - - // The time at which original_request was received. - google.protobuf.Timestamp request_time = 2; - - // The time at which this operation was cancelled. If set, this operation is - // in the process of undoing itself (which is guaranteed to succeed) and - // cannot be cancelled again. - google.protobuf.Timestamp cancel_time = 3; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 4; -} - -// Request message for BigtableClusterService.DeleteCluster. -message DeleteClusterRequest { - // The unique name of the cluster to be deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Request message for BigtableClusterService.UndeleteCluster. -message UndeleteClusterRequest { - // The unique name of the cluster to be un-deleted. - // Values are of the form projects//zones//clusters/ - string name = 1; -} - -// Metadata type for the operation returned by -// BigtableClusterService.UndeleteCluster. -message UndeleteClusterMetadata { - // The time at which the original request was received. - google.protobuf.Timestamp request_time = 1; - - // The time at which this operation failed or was completed successfully. - google.protobuf.Timestamp finish_time = 2; -} diff --git a/gcloud/bigtable/_generated/_bigtable_data.proto b/gcloud/bigtable/_generated/_bigtable_data.proto deleted file mode 100644 index 290eb9116ad0..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_data.proto +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -option java_multiple_files = true; -option java_outer_classname = "BigtableDataProto"; -option java_package = "com.google.bigtable.v1"; - - -// Specifies the complete (requested) contents of a single row of a table. -// Rows which exceed 256MiB in size cannot be read in full. -message Row { - // The unique key which identifies this row within its table. This is the same - // key that's used to identify the row in, for example, a MutateRowRequest. - // May contain any non-empty byte string up to 4KiB in length. - bytes key = 1; - - // May be empty, but only if the entire row is empty. - // The mutual ordering of column families is not specified. - repeated Family families = 2; -} - -// Specifies (some of) the contents of a single row/column family of a table. -message Family { - // The unique key which identifies this family within its row. This is the - // same key that's used to identify the family in, for example, a RowFilter - // which sets its "family_name_regex_filter" field. - // Must match [-_.a-zA-Z0-9]+, except that AggregatingRowProcessors may - // produce cells in a sentinel family with an empty name. - // Must be no greater than 64 characters in length. - string name = 1; - - // Must not be empty. Sorted in order of increasing "qualifier". - repeated Column columns = 2; -} - -// Specifies (some of) the contents of a single row/column of a table. -message Column { - // The unique key which identifies this column within its family. This is the - // same key that's used to identify the column in, for example, a RowFilter - // which sets its "column_qualifier_regex_filter" field. - // May contain any byte string, including the empty string, up to 16kiB in - // length. - bytes qualifier = 1; - - // Must not be empty. Sorted in order of decreasing "timestamp_micros". - repeated Cell cells = 2; -} - -// Specifies (some of) the contents of a single row/column/timestamp of a table. -message Cell { - // The cell's stored timestamp, which also uniquely identifies it within - // its column. - // Values are always expressed in microseconds, but individual tables may set - // a coarser "granularity" to further restrict the allowed values. For - // example, a table which specifies millisecond granularity will only allow - // values of "timestamp_micros" which are multiples of 1000. - int64 timestamp_micros = 1; - - // The value stored in the cell. - // May contain any byte string, including the empty string, up to 100MiB in - // length. - bytes value = 2; - - // Labels applied to the cell by a [RowFilter][google.bigtable.v1.RowFilter]. - repeated string labels = 3; -} - -// Specifies a contiguous range of rows. -message RowRange { - // Inclusive lower bound. If left empty, interpreted as the empty string. - bytes start_key = 2; - - // Exclusive upper bound. If left empty, interpreted as infinity. - bytes end_key = 3; -} - -// Specifies a non-contiguous set of rows. -message RowSet { - // Single rows included in the set. - repeated bytes row_keys = 1; - - // Contiguous row ranges included in the set. - repeated RowRange row_ranges = 2; -} - -// Specifies a contiguous range of columns within a single column family. -// The range spans from : to -// :, where both bounds can be either inclusive or -// exclusive. -message ColumnRange { - // The name of the column family within which this range falls. - string family_name = 1; - - // The column qualifier at which to start the range (within 'column_family'). - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_qualifier { - // Used when giving an inclusive lower bound for the range. - bytes start_qualifier_inclusive = 2; - - // Used when giving an exclusive lower bound for the range. - bytes start_qualifier_exclusive = 3; - } - - // The column qualifier at which to end the range (within 'column_family'). - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_qualifier { - // Used when giving an inclusive upper bound for the range. - bytes end_qualifier_inclusive = 4; - - // Used when giving an exclusive upper bound for the range. - bytes end_qualifier_exclusive = 5; - } -} - -// Specified a contiguous range of microsecond timestamps. -message TimestampRange { - // Inclusive lower bound. If left empty, interpreted as 0. - int64 start_timestamp_micros = 1; - - // Exclusive upper bound. If left empty, interpreted as infinity. - int64 end_timestamp_micros = 2; -} - -// Specifies a contiguous range of raw byte values. -message ValueRange { - // The value at which to start the range. - // If neither field is set, interpreted as the empty string, inclusive. - oneof start_value { - // Used when giving an inclusive lower bound for the range. - bytes start_value_inclusive = 1; - - // Used when giving an exclusive lower bound for the range. - bytes start_value_exclusive = 2; - } - - // The value at which to end the range. - // If neither field is set, interpreted as the infinite string, exclusive. - oneof end_value { - // Used when giving an inclusive upper bound for the range. - bytes end_value_inclusive = 3; - - // Used when giving an exclusive upper bound for the range. - bytes end_value_exclusive = 4; - } -} - -// Takes a row as input and produces an alternate view of the row based on -// specified rules. For example, a RowFilter might trim down a row to include -// just the cells from columns matching a given regular expression, or might -// return all the cells of a row but not their values. More complicated filters -// can be composed out of these components to express requests such as, "within -// every column of a particular family, give just the two most recent cells -// which are older than timestamp X." -// -// There are two broad categories of RowFilters (true filters and transformers), -// as well as two ways to compose simple filters into more complex ones -// (chains and interleaves). They work as follows: -// -// * True filters alter the input row by excluding some of its cells wholesale -// from the output row. An example of a true filter is the "value_regex_filter", -// which excludes cells whose values don't match the specified pattern. All -// regex true filters use RE2 syntax (https://github.com/google/re2/wiki/Syntax) -// in raw byte mode (RE2::Latin1), and are evaluated as full matches. An -// important point to keep in mind is that RE2(.) is equivalent by default to -// RE2([^\n]), meaning that it does not match newlines. When attempting to match -// an arbitrary byte, you should therefore use the escape sequence '\C', which -// may need to be further escaped as '\\C' in your client language. -// -// * Transformers alter the input row by changing the values of some of its -// cells in the output, without excluding them completely. Currently, the only -// supported transformer is the "strip_value_transformer", which replaces every -// cell's value with the empty string. -// -// * Chains and interleaves are described in more detail in the -// RowFilter.Chain and RowFilter.Interleave documentation. -// -// The total serialized size of a RowFilter message must not -// exceed 4096 bytes, and RowFilters may not be nested within each other -// (in Chains or Interleaves) to a depth of more than 20. -message RowFilter { - // A RowFilter which sends rows through several RowFilters in sequence. - message Chain { - // The elements of "filters" are chained together to process the input row: - // in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row - // The full chain is executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which sends each row to each of several component - // RowFilters and interleaves the results. - message Interleave { - // The elements of "filters" all process a copy of the input row, and the - // results are pooled, sorted, and combined into a single output row. - // If multiple cells are produced with the same column and timestamp, - // they will all appear in the output row in an unspecified mutual order. - // Consider the following example, with three filters: - // - // input row - // | - // ----------------------------------------------------- - // | | | - // f(0) f(1) f(2) - // | | | - // 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - // 2: foo,blah,11,z far,blah,5,x far,blah,5,x - // | | | - // ----------------------------------------------------- - // | - // 1: foo,bar,10,z // could have switched with #2 - // 2: foo,bar,10,x // could have switched with #1 - // 3: foo,blah,11,z - // 4: far,bar,7,a - // 5: far,blah,5,x // identical to #6 - // 6: far,blah,5,x // identical to #5 - // All interleaved filters are executed atomically. - repeated RowFilter filters = 1; - } - - // A RowFilter which evaluates one of two possible RowFilters, depending on - // whether or not a predicate RowFilter outputs any cells from the input row. - // - // IMPORTANT NOTE: The predicate filter does not execute atomically with the - // true and false filters, which may lead to inconsistent or unexpected - // results. Additionally, Condition filters have poor performance, especially - // when filters are set for the false condition. - message Condition { - // If "predicate_filter" outputs any cells, then "true_filter" will be - // evaluated on the input row. Otherwise, "false_filter" will be evaluated. - RowFilter predicate_filter = 1; - - // The filter to apply to the input row if "predicate_filter" returns any - // results. If not provided, no results will be returned in the true case. - RowFilter true_filter = 2; - - // The filter to apply to the input row if "predicate_filter" does not - // return any results. If not provided, no results will be returned in the - // false case. - RowFilter false_filter = 3; - } - - // Which of the possible RowFilter types to apply. If none are set, this - // RowFilter returns all cells in the input row. - oneof filter { - // Applies several RowFilters to the data in sequence, progressively - // narrowing the results. - Chain chain = 1; - - // Applies several RowFilters to the data in parallel and combines the - // results. - Interleave interleave = 2; - - // Applies one of two possible RowFilters to the data based on the output of - // a predicate RowFilter. - Condition condition = 3; - - // ADVANCED USE ONLY. - // Hook for introspection into the RowFilter. Outputs all cells directly to - // the output of the read rather than to any parent filter. Consider the - // following example: - // - // Chain( - // FamilyRegex("A"), - // Interleave( - // All(), - // Chain(Label("foo"), Sink()) - // ), - // QualifierRegex("B") - // ) - // - // A,A,1,w - // A,B,2,x - // B,B,4,z - // | - // FamilyRegex("A") - // | - // A,A,1,w - // A,B,2,x - // | - // +------------+-------------+ - // | | - // All() Label(foo) - // | | - // A,A,1,w A,A,1,w,labels:[foo] - // A,B,2,x A,B,2,x,labels:[foo] - // | | - // | Sink() --------------+ - // | | | - // +------------+ x------+ A,A,1,w,labels:[foo] - // | A,B,2,x,labels:[foo] - // A,A,1,w | - // A,B,2,x | - // | | - // QualifierRegex("B") | - // | | - // A,B,2,x | - // | | - // +--------------------------------+ - // | - // A,A,1,w,labels:[foo] - // A,B,2,x,labels:[foo] // could be switched - // A,B,2,x // could be switched - // - // Despite being excluded by the qualifier filter, a copy of every cell - // that reaches the sink is present in the final result. - // - // As with an [Interleave][google.bigtable.v1.RowFilter.Interleave], - // duplicate cells are possible, and appear in an unspecified mutual order. - // In this case we have a duplicate with column "A:B" and timestamp 2, - // because one copy passed through the all filter while the other was - // passed through the label and sink. Note that one copy has label "foo", - // while the other does not. - // - // Cannot be used within the `predicate_filter`, `true_filter`, or - // `false_filter` of a [Condition][google.bigtable.v1.RowFilter.Condition]. - bool sink = 16; - - // Matches all cells, regardless of input. Functionally equivalent to - // leaving `filter` unset, but included for completeness. - bool pass_all_filter = 17; - - // Does not match any cells, regardless of input. Useful for temporarily - // disabling just part of a filter. - bool block_all_filter = 18; - - // Matches only cells from rows whose keys satisfy the given RE2 regex. In - // other words, passes through the entire row when the key matches, and - // otherwise produces an empty row. - // Note that, since row keys can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary key. - bytes row_key_regex_filter = 4; - - // Matches all cells from a row with probability p, and matches no cells - // from the row with probability 1-p. - double row_sample_filter = 14; - - // Matches only cells from columns whose families satisfy the given RE2 - // regex. For technical reasons, the regex must not contain the ':' - // character, even if it is not being used as a literal. - // Note that, since column families cannot contain the new line character - // '\n', it is sufficient to use '.' as a full wildcard when matching - // column family names. - string family_name_regex_filter = 5; - - // Matches only cells from columns whose qualifiers satisfy the given RE2 - // regex. - // Note that, since column qualifiers can contain arbitrary bytes, the '\C' - // escape sequence must be used if a true wildcard is desired. The '.' - // character will not match the new line character '\n', which may be - // present in a binary qualifier. - bytes column_qualifier_regex_filter = 6; - - // Matches only cells from columns within the given range. - ColumnRange column_range_filter = 7; - - // Matches only cells with timestamps within the given range. - TimestampRange timestamp_range_filter = 8; - - // Matches only cells with values that satisfy the given regular expression. - // Note that, since cell values can contain arbitrary bytes, the '\C' escape - // sequence must be used if a true wildcard is desired. The '.' character - // will not match the new line character '\n', which may be present in a - // binary value. - bytes value_regex_filter = 9; - - // Matches only cells with values that fall within the given range. - ValueRange value_range_filter = 15; - - // Skips the first N cells of each row, matching all subsequent cells. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_offset_filter = 10; - - // Matches only the first N cells of each row. - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_row_limit_filter = 11; - - // Matches only the most recent N cells within each column. For example, - // if N=2, this filter would match column "foo:bar" at timestamps 10 and 9, - // skip all earlier cells in "foo:bar", and then begin matching again in - // column "foo:bar2". - // If duplicate cells are present, as is possible when using an Interleave, - // each copy of the cell is counted separately. - int32 cells_per_column_limit_filter = 12; - - // Replaces each cell's value with the empty string. - bool strip_value_transformer = 13; - - // Applies the given label to all cells in the output row. This allows - // the client to determine which results were produced from which part of - // the filter. - // - // Values must be at most 15 characters in length, and match the RE2 - // pattern [a-z0-9\\-]+ - // - // Due to a technical limitation, it is not currently possible to apply - // multiple labels to a cell. As a result, a Chain may have no more than - // one sub-filter which contains a apply_label_transformer. It is okay for - // an Interleave to contain multiple apply_label_transformers, as they will - // be applied to separate copies of the input. This may be relaxed in the - // future. - string apply_label_transformer = 19; - } -} - -// Specifies a particular change to be made to the contents of a row. -message Mutation { - // A Mutation which sets the value of the specified cell. - message SetCell { - // The name of the family into which new data should be written. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column into which new data should be written. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The timestamp of the cell into which new data should be written. - // Use -1 for current Bigtable server time. - // Otherwise, the client should set this value itself, noting that the - // default value is a timestamp of zero if the field is left unspecified. - // Values must match the "granularity" of the table (e.g. micros, millis). - int64 timestamp_micros = 3; - - // The value to be written into the specified cell. - bytes value = 4; - } - - // A Mutation which deletes cells from the specified column, optionally - // restricting the deletions to a given timestamp range. - message DeleteFromColumn { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column from which cells should be deleted. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The range of timestamps within which cells should be deleted. - TimestampRange time_range = 3; - } - - // A Mutation which deletes all cells from the specified column family. - message DeleteFromFamily { - // The name of the family from which cells should be deleted. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - } - - // A Mutation which deletes all cells from the containing row. - message DeleteFromRow { - - } - - // Which of the possible Mutation types to apply. - oneof mutation { - // Set a cell's value. - SetCell set_cell = 1; - - // Deletes cells from a column. - DeleteFromColumn delete_from_column = 2; - - // Deletes cells from a column family. - DeleteFromFamily delete_from_family = 3; - - // Deletes cells from the entire row. - DeleteFromRow delete_from_row = 4; - } -} - -// Specifies an atomic read/modify/write operation on the latest value of the -// specified column. -message ReadModifyWriteRule { - // The name of the family to which the read/modify/write should be applied. - // Must match [-_.a-zA-Z0-9]+ - string family_name = 1; - - // The qualifier of the column to which the read/modify/write should be - // applied. - // Can be any byte string, including the empty string. - bytes column_qualifier = 2; - - // The rule used to determine the column's new latest value from its current - // latest value. - oneof rule { - // Rule specifying that "append_value" be appended to the existing value. - // If the targeted cell is unset, it will be treated as containing the - // empty string. - bytes append_value = 3; - - // Rule specifying that "increment_amount" be added to the existing value. - // If the targeted cell is unset, it will be treated as containing a zero. - // Otherwise, the targeted cell must contain an 8-byte value (interpreted - // as a 64-bit big-endian signed integer), or the entire request will fail. - int64 increment_amount = 4; - } -} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto b/gcloud/bigtable/_generated/_bigtable_instance_admin.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_bigtable_instance_admin.proto rename to gcloud/bigtable/_generated/_bigtable_instance_admin.proto diff --git a/gcloud/bigtable/_generated/_bigtable_service.proto b/gcloud/bigtable/_generated/_bigtable_service.proto deleted file mode 100644 index f1a83d351b63..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_service.proto +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/v1/bigtable_data.proto"; -import "google/bigtable/v1/bigtable_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option java_generic_services = true; -option java_multiple_files = true; -option java_outer_classname = "BigtableServicesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Service for reading from and writing to existing Bigtables. -service BigtableService { - // Streams back the contents of all requested rows, optionally applying - // the same Reader filter to each. Depending on their size, rows may be - // broken up across multiple responses, but atomicity of each row will still - // be preserved. - rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" }; - } - - // Returns a sample of row keys in the table. The returned row keys will - // delimit contiguous sections of the table of approximately equal size, - // which can be used to break up the data for distributed tasks like - // mapreduces. - rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) { - option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" }; - } - - // Mutates a row atomically. Cells already present in the row are left - // unchanged unless explicitly changed by 'mutation'. - rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" }; - } - - // Mutates multiple rows in a batch. Each individual row is mutated - // atomically as in MutateRow, but the entire batch is not executed - // atomically. - rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" }; - } - - // Mutates a row atomically based on the output of a predicate Reader filter. - rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" }; - } - - // Modifies a row atomically, reading the latest existing timestamp/value from - // the specified columns and writing a new value at - // max(existing timestamp, current server time) based on pre-defined - // read/modify/write rules. Returns the new contents of all modified cells. - rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) { - option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" }; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_service_messages.proto b/gcloud/bigtable/_generated/_bigtable_service_messages.proto deleted file mode 100644 index 1479fb65eebf..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_service_messages.proto +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.v1; - -import "google/bigtable/v1/bigtable_data.proto"; -import "google/rpc/status.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableServiceMessagesProto"; -option java_package = "com.google.bigtable.v1"; - - -// Request message for BigtableServer.ReadRows. -message ReadRowsRequest { - // The unique name of the table from which to read. - string table_name = 1; - - // If neither row_key nor row_range is set, reads from all rows. - oneof target { - // The key of a single row from which to read. - bytes row_key = 2; - - // A range of rows from which to read. - RowRange row_range = 3; - - // A set of rows from which to read. Entries need not be in order, and will - // be deduplicated before reading. - // The total serialized size of the set must not exceed 1MB. - RowSet row_set = 8; - } - - // The filter to apply to the contents of the specified row(s). If unset, - // reads the entire table. - RowFilter filter = 5; - - // By default, rows are read sequentially, producing results which are - // guaranteed to arrive in increasing row order. Setting - // "allow_row_interleaving" to true allows multiple rows to be interleaved in - // the response stream, which increases throughput but breaks this guarantee, - // and may force the client to use more memory to buffer partially-received - // rows. Cannot be set to true when specifying "num_rows_limit". - bool allow_row_interleaving = 6; - - // The read will terminate after committing to N rows' worth of results. The - // default (zero) is to return all results. - // Note that "allow_row_interleaving" cannot be set to true when this is set. - int64 num_rows_limit = 7; -} - -// Response message for BigtableService.ReadRows. -message ReadRowsResponse { - // Specifies a piece of a row's contents returned as part of the read - // response stream. - message Chunk { - oneof chunk { - // A subset of the data from a particular row. As long as no "reset_row" - // is received in between, multiple "row_contents" from the same row are - // from the same atomic view of that row, and will be received in the - // expected family/column/timestamp order. - Family row_contents = 1; - - // Indicates that the client should drop all previous chunks for - // "row_key", as it will be re-read from the beginning. - bool reset_row = 2; - - // Indicates that the client can safely process all previous chunks for - // "row_key", as its data has been fully read. - bool commit_row = 3; - } - } - - // The key of the row for which we're receiving data. - // Results will be received in increasing row key order, unless - // "allow_row_interleaving" was specified in the request. - bytes row_key = 1; - - // One or more chunks of the row specified by "row_key". - repeated Chunk chunks = 2; -} - -// Request message for BigtableService.SampleRowKeys. -message SampleRowKeysRequest { - // The unique name of the table from which to sample row keys. - string table_name = 1; -} - -// Response message for BigtableService.SampleRowKeys. -message SampleRowKeysResponse { - // Sorted streamed sequence of sample row keys in the table. The table might - // have contents before the first row key in the list and after the last one, - // but a key containing the empty string indicates "end of table" and will be - // the last response given, if present. - // Note that row keys in this list may not have ever been written to or read - // from, and users should therefore not make any assumptions about the row key - // structure that are specific to their use case. - bytes row_key = 1; - - // Approximate total storage space used by all rows in the table which precede - // "row_key". Buffering the contents of all rows between two subsequent - // samples would require space roughly equal to the difference in their - // "offset_bytes" fields. - int64 offset_bytes = 2; -} - -// Request message for BigtableService.MutateRow. -message MutateRowRequest { - // The unique name of the table to which the mutation should be applied. - string table_name = 1; - - // The key of the row to which the mutation should be applied. - bytes row_key = 2; - - // Changes to be atomically applied to the specified row. Entries are applied - // in order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry and at most 100000. - repeated Mutation mutations = 3; -} - -// Request message for BigtableService.MutateRows. -message MutateRowsRequest { - message Entry { - // The key of the row to which the `mutations` should be applied. - bytes row_key = 1; - - // Changes to be atomically applied to the specified row. Mutations are - // applied in order, meaning that earlier mutations can be masked by - // later ones. - // At least one mutation must be specified. - repeated Mutation mutations = 2; - } - - // The unique name of the table to which the mutations should be applied. - string table_name = 1; - - // The row keys/mutations to be applied in bulk. - // Each entry is applied as an atomic mutation, but the entries may be - // applied in arbitrary order (even between entries for the same row). - // At least one entry must be specified, and in total the entries may - // contain at most 100000 mutations. - repeated Entry entries = 2; -} - -// Response message for BigtableService.MutateRows. -message MutateRowsResponse { - // The results for each Entry from the request, presented in the order - // in which the entries were originally given. - repeated google.rpc.Status statuses = 1; -} - -// Request message for BigtableService.CheckAndMutateRowRequest -message CheckAndMutateRowRequest { - // The unique name of the table to which the conditional mutation should be - // applied. - string table_name = 1; - - // The key of the row to which the conditional mutation should be applied. - bytes row_key = 2; - - // The filter to be applied to the contents of the specified row. Depending - // on whether or not any results are yielded, either "true_mutations" or - // "false_mutations" will be executed. If unset, checks that the row contains - // any values at all. - RowFilter predicate_filter = 6; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // yields at least one cell when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "false_mutations" is empty, and at most - // 100000. - repeated Mutation true_mutations = 4; - - // Changes to be atomically applied to the specified row if "predicate_filter" - // does not yield any cells when applied to "row_key". Entries are applied in - // order, meaning that earlier mutations can be masked by later ones. - // Must contain at least one entry if "true_mutations" is empty, and at most - // 100000. - repeated Mutation false_mutations = 5; -} - -// Response message for BigtableService.CheckAndMutateRowRequest. -message CheckAndMutateRowResponse { - // Whether or not the request's "predicate_filter" yielded any results for - // the specified row. - bool predicate_matched = 1; -} - -// Request message for BigtableService.ReadModifyWriteRowRequest. -message ReadModifyWriteRowRequest { - // The unique name of the table to which the read/modify/write rules should be - // applied. - string table_name = 1; - - // The key of the row to which the read/modify/write rules should be applied. - bytes row_key = 2; - - // Rules specifying how the specified row's contents are to be transformed - // into writes. Entries are applied in order, meaning that earlier rules will - // affect the results of later ones. - repeated ReadModifyWriteRule rules = 3; -} diff --git a/gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto b/gcloud/bigtable/_generated/_bigtable_table_admin.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_bigtable_table_admin.proto rename to gcloud/bigtable/_generated/_bigtable_table_admin.proto diff --git a/gcloud/bigtable/_generated/_bigtable_table_data.proto b/gcloud/bigtable/_generated/_bigtable_table_data.proto deleted file mode 100644 index f81c878f03b5..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_table_data.proto +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/longrunning/operations.proto"; -import "google/protobuf/duration.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableDataProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// A collection of user data indexed by row, column, and timestamp. -// Each table is served using the resources of its parent cluster. -message Table { - enum TimestampGranularity { - MILLIS = 0; - } - - // A unique identifier of the form - // /tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]* - string name = 1; - - // If this Table is in the process of being created, the Operation used to - // track its progress. As long as this operation is present, the Table will - // not accept any Table Admin or Read/Write requests. - google.longrunning.Operation current_operation = 2; - - // The column families configured for this table, mapped by column family id. - map column_families = 3; - - // The granularity (e.g. MILLIS, MICROS) at which timestamps are stored in - // this table. Timestamps not matching the granularity will be rejected. - // Cannot be changed once the table is created. - TimestampGranularity granularity = 4; -} - -// A set of columns within a table which share a common configuration. -message ColumnFamily { - // A unique identifier of the form /columnFamilies/[-_.a-zA-Z0-9]+ - // The last segment is the same as the "name" field in - // google.bigtable.v1.Family. - string name = 1; - - // Garbage collection expression specified by the following grammar: - // GC = EXPR - // | "" ; - // EXPR = EXPR, "||", EXPR (* lowest precedence *) - // | EXPR, "&&", EXPR - // | "(", EXPR, ")" (* highest precedence *) - // | PROP ; - // PROP = "version() >", NUM32 - // | "age() >", NUM64, [ UNIT ] ; - // NUM32 = non-zero-digit { digit } ; (* # NUM32 <= 2^32 - 1 *) - // NUM64 = non-zero-digit { digit } ; (* # NUM64 <= 2^63 - 1 *) - // UNIT = "d" | "h" | "m" (* d=days, h=hours, m=minutes, else micros *) - // GC expressions can be up to 500 characters in length - // - // The different types of PROP are defined as follows: - // version() - cell index, counting from most recent and starting at 1 - // age() - age of the cell (current time minus cell timestamp) - // - // Example: "version() > 3 || (age() > 3d && version() > 1)" - // drop cells beyond the most recent three, and drop cells older than three - // days unless they're the most recent cell in the row/column - // - // Garbage collection executes opportunistically in the background, and so - // it's possible for reads to return a cell even if it matches the active GC - // expression for its family. - string gc_expression = 2; - - // Garbage collection rule specified as a protobuf. - // Supersedes `gc_expression`. - // Must serialize to at most 500 bytes. - // - // NOTE: Garbage collection executes opportunistically in the background, and - // so it's possible for reads to return a cell even if it matches the active - // GC expression for its family. - GcRule gc_rule = 3; -} - -// Rule for determining which cells to delete during garbage collection. -message GcRule { - // A GcRule which deletes cells matching all of the given rules. - message Intersection { - // Only delete cells which would be deleted by every element of `rules`. - repeated GcRule rules = 1; - } - - // A GcRule which deletes cells matching any of the given rules. - message Union { - // Delete cells which would be deleted by any element of `rules`. - repeated GcRule rules = 1; - } - - oneof rule { - // Delete all cells in a column except the most recent N. - int32 max_num_versions = 1; - - // Delete cells in a column older than the given age. - // Values must be at least one millisecond, and will be truncated to - // microsecond granularity. - google.protobuf.Duration max_age = 2; - - // Delete cells that would be deleted by every nested rule. - Intersection intersection = 3; - - // Delete cells that would be deleted by any nested rule. - Union union = 4; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_table_service.proto b/gcloud/bigtable/_generated/_bigtable_table_service.proto deleted file mode 100644 index 417409c4093b..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_table_service.proto +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/api/annotations.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; -import "google/bigtable/admin/table/v1/bigtable_table_service_messages.proto"; -import "google/protobuf/empty.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServicesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -// Service for creating, configuring, and deleting Cloud Bigtable tables. -// Provides access to the table schemas only, not the data stored within the tables. -service BigtableTableService { - // Creates a new table, to be served from a specified cluster. - // The table can be created with a full set of initial column families, - // specified in the request. - rpc CreateTable(CreateTableRequest) returns (Table) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*}/tables" body: "*" }; - } - - // Lists the names of all tables served from a specified cluster. - rpc ListTables(ListTablesRequest) returns (ListTablesResponse) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*}/tables" }; - } - - // Gets the schema of the specified table, including its column families. - rpc GetTable(GetTableRequest) returns (Table) { - option (google.api.http) = { get: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Permanently deletes a specified table and all of its data. - rpc DeleteTable(DeleteTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}" }; - } - - // Changes the name of a specified table. - // Cannot be used to move tables between clusters, zones, or projects. - rpc RenameTable(RenameTableRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename" body: "*" }; - } - - // Creates a new column family within a specified table. - rpc CreateColumnFamily(CreateColumnFamilyRequest) returns (ColumnFamily) { - option (google.api.http) = { post: "/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies" body: "*" }; - } - - // Changes the configuration of a specified column family. - rpc UpdateColumnFamily(ColumnFamily) returns (ColumnFamily) { - option (google.api.http) = { put: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" body: "*" }; - } - - // Permanently deletes a specified column family and all of its data. - rpc DeleteColumnFamily(DeleteColumnFamilyRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}" }; - } -} diff --git a/gcloud/bigtable/_generated/_bigtable_table_service_messages.proto b/gcloud/bigtable/_generated/_bigtable_table_service_messages.proto deleted file mode 100644 index 73f2a8cfbf2b..000000000000 --- a/gcloud/bigtable/_generated/_bigtable_table_service_messages.proto +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.bigtable.admin.table.v1; - -import "google/bigtable/admin/table/v1/bigtable_table_data.proto"; - -option java_multiple_files = true; -option java_outer_classname = "BigtableTableServiceMessagesProto"; -option java_package = "com.google.bigtable.admin.table.v1"; - - -message CreateTableRequest { - // The unique name of the cluster in which to create the new table. - string name = 1; - - // The name by which the new table should be referred to within the cluster, - // e.g. "foobar" rather than "/tables/foobar". - string table_id = 2; - - // The Table to create. The `name` field of the Table and all of its - // ColumnFamilies must be left blank, and will be populated in the response. - Table table = 3; - - // The optional list of row keys that will be used to initially split the - // table into several tablets (Tablets are similar to HBase regions). - // Given two split keys, "s1" and "s2", three tablets will be created, - // spanning the key ranges: [, s1), [s1, s2), [s2, ). - // - // Example: - // * Row keys := ["a", "apple", "custom", "customer_1", "customer_2", - // "other", "zz"] - // * initial_split_keys := ["apple", "customer_1", "customer_2", "other"] - // * Key assignment: - // - Tablet 1 [, apple) => {"a"}. - // - Tablet 2 [apple, customer_1) => {"apple", "custom"}. - // - Tablet 3 [customer_1, customer_2) => {"customer_1"}. - // - Tablet 4 [customer_2, other) => {"customer_2"}. - // - Tablet 5 [other, ) => {"other", "zz"}. - repeated string initial_split_keys = 4; -} - -message ListTablesRequest { - // The unique name of the cluster for which tables should be listed. - string name = 1; -} - -message ListTablesResponse { - // The tables present in the requested cluster. - // At present, only the names of the tables are populated. - repeated Table tables = 1; -} - -message GetTableRequest { - // The unique name of the requested table. - string name = 1; -} - -message DeleteTableRequest { - // The unique name of the table to be deleted. - string name = 1; -} - -message RenameTableRequest { - // The current unique name of the table. - string name = 1; - - // The new name by which the table should be referred to within its containing - // cluster, e.g. "foobar" rather than "/tables/foobar". - string new_id = 2; -} - -message CreateColumnFamilyRequest { - // The unique name of the table in which to create the new column family. - string name = 1; - - // The name by which the new column family should be referred to within the - // table, e.g. "foobar" rather than "/columnFamilies/foobar". - string column_family_id = 2; - - // The column family to create. The `name` field must be left blank. - ColumnFamily column_family = 3; -} - -message DeleteColumnFamilyRequest { - // The unique name of the column family to be deleted. - string name = 1; -} diff --git a/gcloud/bigtable/_generated_v2/_common.proto b/gcloud/bigtable/_generated/_common.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_common.proto rename to gcloud/bigtable/_generated/_common.proto diff --git a/gcloud/bigtable/_generated_v2/_data.proto b/gcloud/bigtable/_generated/_data.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_data.proto rename to gcloud/bigtable/_generated/_data.proto diff --git a/gcloud/bigtable/_generated_v2/_instance.proto b/gcloud/bigtable/_generated/_instance.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_instance.proto rename to gcloud/bigtable/_generated/_instance.proto diff --git a/gcloud/bigtable/_generated_v2/_table.proto b/gcloud/bigtable/_generated/_table.proto similarity index 100% rename from gcloud/bigtable/_generated_v2/_table.proto rename to gcloud/bigtable/_generated/_table.proto diff --git a/gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py b/gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py deleted file mode 100644 index 4106aabd082d..000000000000 --- a/gcloud/bigtable/_generated/bigtable_cluster_data_pb2.py +++ /dev/null @@ -1,221 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto - -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 -from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/cluster/v1/bigtable_cluster_data.proto', - package='google.bigtable.admin.cluster.v1', - syntax='proto3', - serialized_pb=b'\n""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ListZones(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def GetCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ListClusters(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CreateCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def UpdateCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteCluster(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def UndeleteCluster(self, request, context): - raise NotImplementedError() - -class BetaBigtableClusterServiceStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ListZones(self, request, timeout): - raise NotImplementedError() - ListZones.future = None - @abc.abstractmethod - def GetCluster(self, request, timeout): - raise NotImplementedError() - GetCluster.future = None - @abc.abstractmethod - def ListClusters(self, request, timeout): - raise NotImplementedError() - ListClusters.future = None - @abc.abstractmethod - def CreateCluster(self, request, timeout): - raise NotImplementedError() - CreateCluster.future = None - @abc.abstractmethod - def UpdateCluster(self, request, timeout): - raise NotImplementedError() - UpdateCluster.future = None - @abc.abstractmethod - def DeleteCluster(self, request, timeout): - raise NotImplementedError() - DeleteCluster.future = None - @abc.abstractmethod - def UndeleteCluster(self, request, timeout): - raise NotImplementedError() - UndeleteCluster.future = None - -def beta_create_BigtableClusterService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.longrunning.operations_pb2 - request_deserializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - } - response_serializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - } - method_implementations = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): face_utilities.unary_unary_inline(servicer.CreateCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): face_utilities.unary_unary_inline(servicer.DeleteCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): face_utilities.unary_unary_inline(servicer.GetCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): face_utilities.unary_unary_inline(servicer.ListClusters), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): face_utilities.unary_unary_inline(servicer.ListZones), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): face_utilities.unary_unary_inline(servicer.UndeleteCluster), - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): face_utilities.unary_unary_inline(servicer.UpdateCluster), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_BigtableClusterService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_data_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2 - import google.longrunning.operations_pb2 - request_serializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.CreateClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.DeleteClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.GetClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.UndeleteClusterRequest.SerializeToString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.SerializeToString, - } - response_deserializers = { - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'CreateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'DeleteCluster'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'GetCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListClusters'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListClustersResponse.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'ListZones'): gcloud.bigtable._generated.bigtable_cluster_service_messages_pb2.ListZonesResponse.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UndeleteCluster'): google.longrunning.operations_pb2.Operation.FromString, - ('google.bigtable.admin.cluster.v1.BigtableClusterService', 'UpdateCluster'): gcloud.bigtable._generated.bigtable_cluster_data_pb2.Cluster.FromString, - } - cardinalities = { - 'CreateCluster': cardinality.Cardinality.UNARY_UNARY, - 'DeleteCluster': cardinality.Cardinality.UNARY_UNARY, - 'GetCluster': cardinality.Cardinality.UNARY_UNARY, - 'ListClusters': cardinality.Cardinality.UNARY_UNARY, - 'ListZones': cardinality.Cardinality.UNARY_UNARY, - 'UndeleteCluster': cardinality.Cardinality.UNARY_UNARY, - 'UpdateCluster': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.cluster.v1.BigtableClusterService', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_data_pb2.py b/gcloud/bigtable/_generated/bigtable_data_pb2.py deleted file mode 100644 index 47eb8756c7d9..000000000000 --- a/gcloud/bigtable/_generated/bigtable_data_pb2.py +++ /dev/null @@ -1,1226 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v1/bigtable_data.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v1/bigtable_data.proto', - package='google.bigtable.v1', - syntax='proto3', - serialized_pb=b'\n&google/bigtable/v1/bigtable_data.proto\x12\x12google.bigtable.v1\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v1.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v1.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v1.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\".\n\x08RowRange\x12\x11\n\tstart_key\x18\x02 \x01(\x0c\x12\x0f\n\x07\x65nd_key\x18\x03 \x01(\x0c\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v1.RowRange\"\xd6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12#\n\x19start_qualifier_inclusive\x18\x02 \x01(\x0cH\x00\x12#\n\x19start_qualifier_exclusive\x18\x03 \x01(\x0cH\x00\x12!\n\x17\x65nd_qualifier_inclusive\x18\x04 \x01(\x0cH\x01\x12!\n\x17\x65nd_qualifier_exclusive\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\xa8\x01\n\nValueRange\x12\x1f\n\x15start_value_inclusive\x18\x01 \x01(\x0cH\x00\x12\x1f\n\x15start_value_exclusive\x18\x02 \x01(\x0cH\x00\x12\x1d\n\x13\x65nd_value_inclusive\x18\x03 \x01(\x0cH\x01\x12\x1d\n\x13\x65nd_value_exclusive\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v1.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v1.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v1.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v1.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v1.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v1.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v1.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v1.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v1.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v1.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v1.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04ruleB-\n\x16\x63om.google.bigtable.v1B\x11\x42igtableDataProtoP\x01\x62\x06proto3' -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_ROW = _descriptor.Descriptor( - name='Row', - full_name='google.bigtable.v1.Row', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.v1.Row.key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='families', full_name='google.bigtable.v1.Row.families', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=62, - serialized_end=126, -) - - -_FAMILY = _descriptor.Descriptor( - name='Family', - full_name='google.bigtable.v1.Family', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.v1.Family.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='columns', full_name='google.bigtable.v1.Family.columns', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=128, - serialized_end=195, -) - - -_COLUMN = _descriptor.Descriptor( - name='Column', - full_name='google.bigtable.v1.Column', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='qualifier', full_name='google.bigtable.v1.Column.qualifier', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells', full_name='google.bigtable.v1.Column.cells', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=197, - serialized_end=265, -) - - -_CELL = _descriptor.Descriptor( - name='Cell', - full_name='google.bigtable.v1.Cell', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v1.Cell.timestamp_micros', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v1.Cell.value', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='labels', full_name='google.bigtable.v1.Cell.labels', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=267, - serialized_end=330, -) - - -_ROWRANGE = _descriptor.Descriptor( - name='RowRange', - full_name='google.bigtable.v1.RowRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_key', full_name='google.bigtable.v1.RowRange.start_key', index=0, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_key', full_name='google.bigtable.v1.RowRange.end_key', index=1, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=332, - serialized_end=378, -) - - -_ROWSET = _descriptor.Descriptor( - name='RowSet', - full_name='google.bigtable.v1.RowSet', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_keys', full_name='google.bigtable.v1.RowSet.row_keys', index=0, - number=1, type=12, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_ranges', full_name='google.bigtable.v1.RowSet.row_ranges', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=380, - serialized_end=456, -) - - -_COLUMNRANGE = _descriptor.Descriptor( - name='ColumnRange', - full_name='google.bigtable.v1.ColumnRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.ColumnRange.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_qualifier_inclusive', full_name='google.bigtable.v1.ColumnRange.start_qualifier_inclusive', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_qualifier_exclusive', full_name='google.bigtable.v1.ColumnRange.start_qualifier_exclusive', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_qualifier_inclusive', full_name='google.bigtable.v1.ColumnRange.end_qualifier_inclusive', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_qualifier_exclusive', full_name='google.bigtable.v1.ColumnRange.end_qualifier_exclusive', index=4, - number=5, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_qualifier', full_name='google.bigtable.v1.ColumnRange.start_qualifier', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_qualifier', full_name='google.bigtable.v1.ColumnRange.end_qualifier', - index=1, containing_type=None, fields=[]), - ], - serialized_start=459, - serialized_end=673, -) - - -_TIMESTAMPRANGE = _descriptor.Descriptor( - name='TimestampRange', - full_name='google.bigtable.v1.TimestampRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_timestamp_micros', full_name='google.bigtable.v1.TimestampRange.start_timestamp_micros', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_timestamp_micros', full_name='google.bigtable.v1.TimestampRange.end_timestamp_micros', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=675, - serialized_end=753, -) - - -_VALUERANGE = _descriptor.Descriptor( - name='ValueRange', - full_name='google.bigtable.v1.ValueRange', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='start_value_inclusive', full_name='google.bigtable.v1.ValueRange.start_value_inclusive', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='start_value_exclusive', full_name='google.bigtable.v1.ValueRange.start_value_exclusive', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_value_inclusive', full_name='google.bigtable.v1.ValueRange.end_value_inclusive', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_value_exclusive', full_name='google.bigtable.v1.ValueRange.end_value_exclusive', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='start_value', full_name='google.bigtable.v1.ValueRange.start_value', - index=0, containing_type=None, fields=[]), - _descriptor.OneofDescriptor( - name='end_value', full_name='google.bigtable.v1.ValueRange.end_value', - index=1, containing_type=None, fields=[]), - ], - serialized_start=756, - serialized_end=924, -) - - -_ROWFILTER_CHAIN = _descriptor.Descriptor( - name='Chain', - full_name='google.bigtable.v1.RowFilter.Chain', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filters', full_name='google.bigtable.v1.RowFilter.Chain.filters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1743, - serialized_end=1798, -) - -_ROWFILTER_INTERLEAVE = _descriptor.Descriptor( - name='Interleave', - full_name='google.bigtable.v1.RowFilter.Interleave', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filters', full_name='google.bigtable.v1.RowFilter.Interleave.filters', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1800, - serialized_end=1860, -) - -_ROWFILTER_CONDITION = _descriptor.Descriptor( - name='Condition', - full_name='google.bigtable.v1.RowFilter.Condition', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v1.RowFilter.Condition.predicate_filter', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='true_filter', full_name='google.bigtable.v1.RowFilter.Condition.true_filter', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='false_filter', full_name='google.bigtable.v1.RowFilter.Condition.false_filter', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1863, - serialized_end=2036, -) - -_ROWFILTER = _descriptor.Descriptor( - name='RowFilter', - full_name='google.bigtable.v1.RowFilter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chain', full_name='google.bigtable.v1.RowFilter.chain', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='interleave', full_name='google.bigtable.v1.RowFilter.interleave', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='condition', full_name='google.bigtable.v1.RowFilter.condition', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sink', full_name='google.bigtable.v1.RowFilter.sink', index=3, - number=16, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pass_all_filter', full_name='google.bigtable.v1.RowFilter.pass_all_filter', index=4, - number=17, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='block_all_filter', full_name='google.bigtable.v1.RowFilter.block_all_filter', index=5, - number=18, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key_regex_filter', full_name='google.bigtable.v1.RowFilter.row_key_regex_filter', index=6, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_sample_filter', full_name='google.bigtable.v1.RowFilter.row_sample_filter', index=7, - number=14, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='family_name_regex_filter', full_name='google.bigtable.v1.RowFilter.family_name_regex_filter', index=8, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier_regex_filter', full_name='google.bigtable.v1.RowFilter.column_qualifier_regex_filter', index=9, - number=6, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_range_filter', full_name='google.bigtable.v1.RowFilter.column_range_filter', index=10, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp_range_filter', full_name='google.bigtable.v1.RowFilter.timestamp_range_filter', index=11, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value_regex_filter', full_name='google.bigtable.v1.RowFilter.value_regex_filter', index=12, - number=9, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value_range_filter', full_name='google.bigtable.v1.RowFilter.value_range_filter', index=13, - number=15, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells_per_row_offset_filter', full_name='google.bigtable.v1.RowFilter.cells_per_row_offset_filter', index=14, - number=10, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells_per_row_limit_filter', full_name='google.bigtable.v1.RowFilter.cells_per_row_limit_filter', index=15, - number=11, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cells_per_column_limit_filter', full_name='google.bigtable.v1.RowFilter.cells_per_column_limit_filter', index=16, - number=12, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='strip_value_transformer', full_name='google.bigtable.v1.RowFilter.strip_value_transformer', index=17, - number=13, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='apply_label_transformer', full_name='google.bigtable.v1.RowFilter.apply_label_transformer', index=18, - number=19, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_ROWFILTER_CHAIN, _ROWFILTER_INTERLEAVE, _ROWFILTER_CONDITION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='filter', full_name='google.bigtable.v1.RowFilter.filter', - index=0, containing_type=None, fields=[]), - ], - serialized_start=927, - serialized_end=2046, -) - - -_MUTATION_SETCELL = _descriptor.Descriptor( - name='SetCell', - full_name='google.bigtable.v1.Mutation.SetCell', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.Mutation.SetCell.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v1.Mutation.SetCell.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='timestamp_micros', full_name='google.bigtable.v1.Mutation.SetCell.timestamp_micros', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.v1.Mutation.SetCell.value', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2344, - serialized_end=2441, -) - -_MUTATION_DELETEFROMCOLUMN = _descriptor.Descriptor( - name='DeleteFromColumn', - full_name='google.bigtable.v1.Mutation.DeleteFromColumn', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.Mutation.DeleteFromColumn.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v1.Mutation.DeleteFromColumn.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='time_range', full_name='google.bigtable.v1.Mutation.DeleteFromColumn.time_range', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2443, - serialized_end=2564, -) - -_MUTATION_DELETEFROMFAMILY = _descriptor.Descriptor( - name='DeleteFromFamily', - full_name='google.bigtable.v1.Mutation.DeleteFromFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.Mutation.DeleteFromFamily.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2566, - serialized_end=2605, -) - -_MUTATION_DELETEFROMROW = _descriptor.Descriptor( - name='DeleteFromRow', - full_name='google.bigtable.v1.Mutation.DeleteFromRow', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2607, - serialized_end=2622, -) - -_MUTATION = _descriptor.Descriptor( - name='Mutation', - full_name='google.bigtable.v1.Mutation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='set_cell', full_name='google.bigtable.v1.Mutation.set_cell', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_from_column', full_name='google.bigtable.v1.Mutation.delete_from_column', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_from_family', full_name='google.bigtable.v1.Mutation.delete_from_family', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delete_from_row', full_name='google.bigtable.v1.Mutation.delete_from_row', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MUTATION_SETCELL, _MUTATION_DELETEFROMCOLUMN, _MUTATION_DELETEFROMFAMILY, _MUTATION_DELETEFROMROW, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='mutation', full_name='google.bigtable.v1.Mutation.mutation', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2049, - serialized_end=2634, -) - - -_READMODIFYWRITERULE = _descriptor.Descriptor( - name='ReadModifyWriteRule', - full_name='google.bigtable.v1.ReadModifyWriteRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='family_name', full_name='google.bigtable.v1.ReadModifyWriteRule.family_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_qualifier', full_name='google.bigtable.v1.ReadModifyWriteRule.column_qualifier', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='append_value', full_name='google.bigtable.v1.ReadModifyWriteRule.append_value', index=2, - number=3, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='increment_amount', full_name='google.bigtable.v1.ReadModifyWriteRule.increment_amount', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.v1.ReadModifyWriteRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=2637, - serialized_end=2765, -) - -_ROW.fields_by_name['families'].message_type = _FAMILY -_FAMILY.fields_by_name['columns'].message_type = _COLUMN -_COLUMN.fields_by_name['cells'].message_type = _CELL -_ROWSET.fields_by_name['row_ranges'].message_type = _ROWRANGE -_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['start_qualifier_inclusive']) -_COLUMNRANGE.fields_by_name['start_qualifier_inclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] -_COLUMNRANGE.oneofs_by_name['start_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['start_qualifier_exclusive']) -_COLUMNRANGE.fields_by_name['start_qualifier_exclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['start_qualifier'] -_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['end_qualifier_inclusive']) -_COLUMNRANGE.fields_by_name['end_qualifier_inclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] -_COLUMNRANGE.oneofs_by_name['end_qualifier'].fields.append( - _COLUMNRANGE.fields_by_name['end_qualifier_exclusive']) -_COLUMNRANGE.fields_by_name['end_qualifier_exclusive'].containing_oneof = _COLUMNRANGE.oneofs_by_name['end_qualifier'] -_VALUERANGE.oneofs_by_name['start_value'].fields.append( - _VALUERANGE.fields_by_name['start_value_inclusive']) -_VALUERANGE.fields_by_name['start_value_inclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] -_VALUERANGE.oneofs_by_name['start_value'].fields.append( - _VALUERANGE.fields_by_name['start_value_exclusive']) -_VALUERANGE.fields_by_name['start_value_exclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['start_value'] -_VALUERANGE.oneofs_by_name['end_value'].fields.append( - _VALUERANGE.fields_by_name['end_value_inclusive']) -_VALUERANGE.fields_by_name['end_value_inclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] -_VALUERANGE.oneofs_by_name['end_value'].fields.append( - _VALUERANGE.fields_by_name['end_value_exclusive']) -_VALUERANGE.fields_by_name['end_value_exclusive'].containing_oneof = _VALUERANGE.oneofs_by_name['end_value'] -_ROWFILTER_CHAIN.fields_by_name['filters'].message_type = _ROWFILTER -_ROWFILTER_CHAIN.containing_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.fields_by_name['filters'].message_type = _ROWFILTER -_ROWFILTER_INTERLEAVE.containing_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['predicate_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['true_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.fields_by_name['false_filter'].message_type = _ROWFILTER -_ROWFILTER_CONDITION.containing_type = _ROWFILTER -_ROWFILTER.fields_by_name['chain'].message_type = _ROWFILTER_CHAIN -_ROWFILTER.fields_by_name['interleave'].message_type = _ROWFILTER_INTERLEAVE -_ROWFILTER.fields_by_name['condition'].message_type = _ROWFILTER_CONDITION -_ROWFILTER.fields_by_name['column_range_filter'].message_type = _COLUMNRANGE -_ROWFILTER.fields_by_name['timestamp_range_filter'].message_type = _TIMESTAMPRANGE -_ROWFILTER.fields_by_name['value_range_filter'].message_type = _VALUERANGE -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['chain']) -_ROWFILTER.fields_by_name['chain'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['interleave']) -_ROWFILTER.fields_by_name['interleave'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['condition']) -_ROWFILTER.fields_by_name['condition'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['sink']) -_ROWFILTER.fields_by_name['sink'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['pass_all_filter']) -_ROWFILTER.fields_by_name['pass_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['block_all_filter']) -_ROWFILTER.fields_by_name['block_all_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['row_key_regex_filter']) -_ROWFILTER.fields_by_name['row_key_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['row_sample_filter']) -_ROWFILTER.fields_by_name['row_sample_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['family_name_regex_filter']) -_ROWFILTER.fields_by_name['family_name_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['column_qualifier_regex_filter']) -_ROWFILTER.fields_by_name['column_qualifier_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['column_range_filter']) -_ROWFILTER.fields_by_name['column_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['timestamp_range_filter']) -_ROWFILTER.fields_by_name['timestamp_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['value_regex_filter']) -_ROWFILTER.fields_by_name['value_regex_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['value_range_filter']) -_ROWFILTER.fields_by_name['value_range_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_row_offset_filter']) -_ROWFILTER.fields_by_name['cells_per_row_offset_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_row_limit_filter']) -_ROWFILTER.fields_by_name['cells_per_row_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['cells_per_column_limit_filter']) -_ROWFILTER.fields_by_name['cells_per_column_limit_filter'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['strip_value_transformer']) -_ROWFILTER.fields_by_name['strip_value_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_ROWFILTER.oneofs_by_name['filter'].fields.append( - _ROWFILTER.fields_by_name['apply_label_transformer']) -_ROWFILTER.fields_by_name['apply_label_transformer'].containing_oneof = _ROWFILTER.oneofs_by_name['filter'] -_MUTATION_SETCELL.containing_type = _MUTATION -_MUTATION_DELETEFROMCOLUMN.fields_by_name['time_range'].message_type = _TIMESTAMPRANGE -_MUTATION_DELETEFROMCOLUMN.containing_type = _MUTATION -_MUTATION_DELETEFROMFAMILY.containing_type = _MUTATION -_MUTATION_DELETEFROMROW.containing_type = _MUTATION -_MUTATION.fields_by_name['set_cell'].message_type = _MUTATION_SETCELL -_MUTATION.fields_by_name['delete_from_column'].message_type = _MUTATION_DELETEFROMCOLUMN -_MUTATION.fields_by_name['delete_from_family'].message_type = _MUTATION_DELETEFROMFAMILY -_MUTATION.fields_by_name['delete_from_row'].message_type = _MUTATION_DELETEFROMROW -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['set_cell']) -_MUTATION.fields_by_name['set_cell'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_column']) -_MUTATION.fields_by_name['delete_from_column'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_family']) -_MUTATION.fields_by_name['delete_from_family'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_MUTATION.oneofs_by_name['mutation'].fields.append( - _MUTATION.fields_by_name['delete_from_row']) -_MUTATION.fields_by_name['delete_from_row'].containing_oneof = _MUTATION.oneofs_by_name['mutation'] -_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( - _READMODIFYWRITERULE.fields_by_name['append_value']) -_READMODIFYWRITERULE.fields_by_name['append_value'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] -_READMODIFYWRITERULE.oneofs_by_name['rule'].fields.append( - _READMODIFYWRITERULE.fields_by_name['increment_amount']) -_READMODIFYWRITERULE.fields_by_name['increment_amount'].containing_oneof = _READMODIFYWRITERULE.oneofs_by_name['rule'] -DESCRIPTOR.message_types_by_name['Row'] = _ROW -DESCRIPTOR.message_types_by_name['Family'] = _FAMILY -DESCRIPTOR.message_types_by_name['Column'] = _COLUMN -DESCRIPTOR.message_types_by_name['Cell'] = _CELL -DESCRIPTOR.message_types_by_name['RowRange'] = _ROWRANGE -DESCRIPTOR.message_types_by_name['RowSet'] = _ROWSET -DESCRIPTOR.message_types_by_name['ColumnRange'] = _COLUMNRANGE -DESCRIPTOR.message_types_by_name['TimestampRange'] = _TIMESTAMPRANGE -DESCRIPTOR.message_types_by_name['ValueRange'] = _VALUERANGE -DESCRIPTOR.message_types_by_name['RowFilter'] = _ROWFILTER -DESCRIPTOR.message_types_by_name['Mutation'] = _MUTATION -DESCRIPTOR.message_types_by_name['ReadModifyWriteRule'] = _READMODIFYWRITERULE - -Row = _reflection.GeneratedProtocolMessageType('Row', (_message.Message,), dict( - DESCRIPTOR = _ROW, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Row) - )) -_sym_db.RegisterMessage(Row) - -Family = _reflection.GeneratedProtocolMessageType('Family', (_message.Message,), dict( - DESCRIPTOR = _FAMILY, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Family) - )) -_sym_db.RegisterMessage(Family) - -Column = _reflection.GeneratedProtocolMessageType('Column', (_message.Message,), dict( - DESCRIPTOR = _COLUMN, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Column) - )) -_sym_db.RegisterMessage(Column) - -Cell = _reflection.GeneratedProtocolMessageType('Cell', (_message.Message,), dict( - DESCRIPTOR = _CELL, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Cell) - )) -_sym_db.RegisterMessage(Cell) - -RowRange = _reflection.GeneratedProtocolMessageType('RowRange', (_message.Message,), dict( - DESCRIPTOR = _ROWRANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowRange) - )) -_sym_db.RegisterMessage(RowRange) - -RowSet = _reflection.GeneratedProtocolMessageType('RowSet', (_message.Message,), dict( - DESCRIPTOR = _ROWSET, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowSet) - )) -_sym_db.RegisterMessage(RowSet) - -ColumnRange = _reflection.GeneratedProtocolMessageType('ColumnRange', (_message.Message,), dict( - DESCRIPTOR = _COLUMNRANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ColumnRange) - )) -_sym_db.RegisterMessage(ColumnRange) - -TimestampRange = _reflection.GeneratedProtocolMessageType('TimestampRange', (_message.Message,), dict( - DESCRIPTOR = _TIMESTAMPRANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.TimestampRange) - )) -_sym_db.RegisterMessage(TimestampRange) - -ValueRange = _reflection.GeneratedProtocolMessageType('ValueRange', (_message.Message,), dict( - DESCRIPTOR = _VALUERANGE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ValueRange) - )) -_sym_db.RegisterMessage(ValueRange) - -RowFilter = _reflection.GeneratedProtocolMessageType('RowFilter', (_message.Message,), dict( - - Chain = _reflection.GeneratedProtocolMessageType('Chain', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_CHAIN, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter.Chain) - )) - , - - Interleave = _reflection.GeneratedProtocolMessageType('Interleave', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_INTERLEAVE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter.Interleave) - )) - , - - Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), dict( - DESCRIPTOR = _ROWFILTER_CONDITION, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter.Condition) - )) - , - DESCRIPTOR = _ROWFILTER, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.RowFilter) - )) -_sym_db.RegisterMessage(RowFilter) -_sym_db.RegisterMessage(RowFilter.Chain) -_sym_db.RegisterMessage(RowFilter.Interleave) -_sym_db.RegisterMessage(RowFilter.Condition) - -Mutation = _reflection.GeneratedProtocolMessageType('Mutation', (_message.Message,), dict( - - SetCell = _reflection.GeneratedProtocolMessageType('SetCell', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_SETCELL, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.SetCell) - )) - , - - DeleteFromColumn = _reflection.GeneratedProtocolMessageType('DeleteFromColumn', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMCOLUMN, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.DeleteFromColumn) - )) - , - - DeleteFromFamily = _reflection.GeneratedProtocolMessageType('DeleteFromFamily', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMFAMILY, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.DeleteFromFamily) - )) - , - - DeleteFromRow = _reflection.GeneratedProtocolMessageType('DeleteFromRow', (_message.Message,), dict( - DESCRIPTOR = _MUTATION_DELETEFROMROW, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation.DeleteFromRow) - )) - , - DESCRIPTOR = _MUTATION, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.Mutation) - )) -_sym_db.RegisterMessage(Mutation) -_sym_db.RegisterMessage(Mutation.SetCell) -_sym_db.RegisterMessage(Mutation.DeleteFromColumn) -_sym_db.RegisterMessage(Mutation.DeleteFromFamily) -_sym_db.RegisterMessage(Mutation.DeleteFromRow) - -ReadModifyWriteRule = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRule', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITERULE, - __module__ = 'google.bigtable.v1.bigtable_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadModifyWriteRule) - )) -_sym_db.RegisterMessage(ReadModifyWriteRule) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\021BigtableDataProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py b/gcloud/bigtable/_generated/bigtable_instance_admin_pb2.py similarity index 99% rename from gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py rename to gcloud/bigtable/_generated/bigtable_instance_admin_pb2.py index 9da2364b7866..e4d1bf87bcc2 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_instance_admin_pb2.py +++ b/gcloud/bigtable/_generated/bigtable_instance_admin_pb2.py @@ -14,7 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated_v2 import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 +from gcloud.bigtable._generated import instance_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_instance__pb2 from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 diff --git a/gcloud/bigtable/_generated_v2/bigtable_pb2.py b/gcloud/bigtable/_generated/bigtable_pb2.py similarity index 99% rename from gcloud/bigtable/_generated_v2/bigtable_pb2.py rename to gcloud/bigtable/_generated/bigtable_pb2.py index 606b3c826942..3a6aaaf93ad9 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_pb2.py +++ b/gcloud/bigtable/_generated/bigtable_pb2.py @@ -14,7 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated_v2 import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from gcloud.bigtable._generated import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 diff --git a/gcloud/bigtable/_generated/bigtable_service_messages_pb2.py b/gcloud/bigtable/_generated/bigtable_service_messages_pb2.py deleted file mode 100644 index 38a478aded74..000000000000 --- a/gcloud/bigtable/_generated/bigtable_service_messages_pb2.py +++ /dev/null @@ -1,678 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v1/bigtable_service_messages.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2 -from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v1/bigtable_service_messages.proto', - package='google.bigtable.v1', - syntax='proto3', - serialized_pb=b'\n2google/bigtable/v1/bigtable_service_messages.proto\x12\x12google.bigtable.v1\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x17google/rpc/status.proto\"\x8b\x02\n\x0fReadRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x11\n\x07row_key\x18\x02 \x01(\x0cH\x00\x12\x31\n\trow_range\x18\x03 \x01(\x0b\x32\x1c.google.bigtable.v1.RowRangeH\x00\x12-\n\x07row_set\x18\x08 \x01(\x0b\x32\x1a.google.bigtable.v1.RowSetH\x00\x12-\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x1e\n\x16\x61llow_row_interleaving\x18\x06 \x01(\x08\x12\x16\n\x0enum_rows_limit\x18\x07 \x01(\x03\x42\x08\n\x06target\"\xd0\x01\n\x10ReadRowsResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12:\n\x06\x63hunks\x18\x02 \x03(\x0b\x32*.google.bigtable.v1.ReadRowsResponse.Chunk\x1ao\n\x05\x43hunk\x12\x32\n\x0crow_contents\x18\x01 \x01(\x0b\x32\x1a.google.bigtable.v1.FamilyH\x00\x12\x13\n\treset_row\x18\x02 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\x03 \x01(\x08H\x00\x42\x07\n\x05\x63hunk\"*\n\x14SampleRowKeysRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"h\n\x10MutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12/\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"\xb0\x01\n\x11MutateRowsRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12<\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v1.MutateRowsRequest.Entry\x1aI\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12/\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\":\n\x12MutateRowsResponse\x12$\n\x08statuses\x18\x01 \x03(\x0b\x32\x12.google.rpc.Status\"\xe5\x01\n\x18\x43heckAndMutateRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v1.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v1.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"x\n\x19ReadModifyWriteRowRequest\x12\x12\n\ntable_name\x18\x01 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\x0c\x12\x36\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v1.ReadModifyWriteRuleB8\n\x16\x63om.google.bigtable.v1B\x1c\x42igtableServiceMessagesProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_READROWSREQUEST = _descriptor.Descriptor( - name='ReadRowsRequest', - full_name='google.bigtable.v1.ReadRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.ReadRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.ReadRowsRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_range', full_name='google.bigtable.v1.ReadRowsRequest.row_range', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_set', full_name='google.bigtable.v1.ReadRowsRequest.row_set', index=3, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filter', full_name='google.bigtable.v1.ReadRowsRequest.filter', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='allow_row_interleaving', full_name='google.bigtable.v1.ReadRowsRequest.allow_row_interleaving', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_rows_limit', full_name='google.bigtable.v1.ReadRowsRequest.num_rows_limit', index=6, - number=7, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='target', full_name='google.bigtable.v1.ReadRowsRequest.target', - index=0, containing_type=None, fields=[]), - ], - serialized_start=140, - serialized_end=407, -) - - -_READROWSRESPONSE_CHUNK = _descriptor.Descriptor( - name='Chunk', - full_name='google.bigtable.v1.ReadRowsResponse.Chunk', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_contents', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.row_contents', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reset_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.reset_row', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='commit_row', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.commit_row', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='chunk', full_name='google.bigtable.v1.ReadRowsResponse.Chunk.chunk', - index=0, containing_type=None, fields=[]), - ], - serialized_start=507, - serialized_end=618, -) - -_READROWSRESPONSE = _descriptor.Descriptor( - name='ReadRowsResponse', - full_name='google.bigtable.v1.ReadRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.ReadRowsResponse.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chunks', full_name='google.bigtable.v1.ReadRowsResponse.chunks', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_READROWSRESPONSE_CHUNK, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=410, - serialized_end=618, -) - - -_SAMPLEROWKEYSREQUEST = _descriptor.Descriptor( - name='SampleRowKeysRequest', - full_name='google.bigtable.v1.SampleRowKeysRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.SampleRowKeysRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=620, - serialized_end=662, -) - - -_SAMPLEROWKEYSRESPONSE = _descriptor.Descriptor( - name='SampleRowKeysResponse', - full_name='google.bigtable.v1.SampleRowKeysResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.SampleRowKeysResponse.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='offset_bytes', full_name='google.bigtable.v1.SampleRowKeysResponse.offset_bytes', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=664, - serialized_end=726, -) - - -_MUTATEROWREQUEST = _descriptor.Descriptor( - name='MutateRowRequest', - full_name='google.bigtable.v1.MutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.MutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.MutateRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v1.MutateRowRequest.mutations', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=728, - serialized_end=832, -) - - -_MUTATEROWSREQUEST_ENTRY = _descriptor.Descriptor( - name='Entry', - full_name='google.bigtable.v1.MutateRowsRequest.Entry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.MutateRowsRequest.Entry.row_key', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mutations', full_name='google.bigtable.v1.MutateRowsRequest.Entry.mutations', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=938, - serialized_end=1011, -) - -_MUTATEROWSREQUEST = _descriptor.Descriptor( - name='MutateRowsRequest', - full_name='google.bigtable.v1.MutateRowsRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.MutateRowsRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='entries', full_name='google.bigtable.v1.MutateRowsRequest.entries', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_MUTATEROWSREQUEST_ENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=835, - serialized_end=1011, -) - - -_MUTATEROWSRESPONSE = _descriptor.Descriptor( - name='MutateRowsResponse', - full_name='google.bigtable.v1.MutateRowsResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='statuses', full_name='google.bigtable.v1.MutateRowsResponse.statuses', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1013, - serialized_end=1071, -) - - -_CHECKANDMUTATEROWREQUEST = _descriptor.Descriptor( - name='CheckAndMutateRowRequest', - full_name='google.bigtable.v1.CheckAndMutateRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.CheckAndMutateRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.CheckAndMutateRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='predicate_filter', full_name='google.bigtable.v1.CheckAndMutateRowRequest.predicate_filter', index=2, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='true_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.true_mutations', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='false_mutations', full_name='google.bigtable.v1.CheckAndMutateRowRequest.false_mutations', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1074, - serialized_end=1303, -) - - -_CHECKANDMUTATEROWRESPONSE = _descriptor.Descriptor( - name='CheckAndMutateRowResponse', - full_name='google.bigtable.v1.CheckAndMutateRowResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='predicate_matched', full_name='google.bigtable.v1.CheckAndMutateRowResponse.predicate_matched', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1305, - serialized_end=1359, -) - - -_READMODIFYWRITEROWREQUEST = _descriptor.Descriptor( - name='ReadModifyWriteRowRequest', - full_name='google.bigtable.v1.ReadModifyWriteRowRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='table_name', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.table_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='row_key', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.row_key', index=1, - number=2, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=b"", - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.v1.ReadModifyWriteRowRequest.rules', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1361, - serialized_end=1481, -) - -_READROWSREQUEST.fields_by_name['row_range'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWRANGE -_READROWSREQUEST.fields_by_name['row_set'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWSET -_READROWSREQUEST.fields_by_name['filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER -_READROWSREQUEST.oneofs_by_name['target'].fields.append( - _READROWSREQUEST.fields_by_name['row_key']) -_READROWSREQUEST.fields_by_name['row_key'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target'] -_READROWSREQUEST.oneofs_by_name['target'].fields.append( - _READROWSREQUEST.fields_by_name['row_range']) -_READROWSREQUEST.fields_by_name['row_range'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target'] -_READROWSREQUEST.oneofs_by_name['target'].fields.append( - _READROWSREQUEST.fields_by_name['row_set']) -_READROWSREQUEST.fields_by_name['row_set'].containing_oneof = _READROWSREQUEST.oneofs_by_name['target'] -_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._FAMILY -_READROWSRESPONSE_CHUNK.containing_type = _READROWSRESPONSE -_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append( - _READROWSRESPONSE_CHUNK.fields_by_name['row_contents']) -_READROWSRESPONSE_CHUNK.fields_by_name['row_contents'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'] -_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append( - _READROWSRESPONSE_CHUNK.fields_by_name['reset_row']) -_READROWSRESPONSE_CHUNK.fields_by_name['reset_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'] -_READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'].fields.append( - _READROWSRESPONSE_CHUNK.fields_by_name['commit_row']) -_READROWSRESPONSE_CHUNK.fields_by_name['commit_row'].containing_oneof = _READROWSRESPONSE_CHUNK.oneofs_by_name['chunk'] -_READROWSRESPONSE.fields_by_name['chunks'].message_type = _READROWSRESPONSE_CHUNK -_MUTATEROWREQUEST.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_MUTATEROWSREQUEST_ENTRY.containing_type = _MUTATEROWSREQUEST -_MUTATEROWSREQUEST.fields_by_name['entries'].message_type = _MUTATEROWSREQUEST_ENTRY -_MUTATEROWSRESPONSE.fields_by_name['statuses'].message_type = google_dot_rpc_dot_status__pb2._STATUS -_CHECKANDMUTATEROWREQUEST.fields_by_name['predicate_filter'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._ROWFILTER -_CHECKANDMUTATEROWREQUEST.fields_by_name['true_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_CHECKANDMUTATEROWREQUEST.fields_by_name['false_mutations'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._MUTATION -_READMODIFYWRITEROWREQUEST.fields_by_name['rules'].message_type = google_dot_bigtable_dot_v1_dot_bigtable__data__pb2._READMODIFYWRITERULE -DESCRIPTOR.message_types_by_name['ReadRowsRequest'] = _READROWSREQUEST -DESCRIPTOR.message_types_by_name['ReadRowsResponse'] = _READROWSRESPONSE -DESCRIPTOR.message_types_by_name['SampleRowKeysRequest'] = _SAMPLEROWKEYSREQUEST -DESCRIPTOR.message_types_by_name['SampleRowKeysResponse'] = _SAMPLEROWKEYSRESPONSE -DESCRIPTOR.message_types_by_name['MutateRowRequest'] = _MUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['MutateRowsRequest'] = _MUTATEROWSREQUEST -DESCRIPTOR.message_types_by_name['MutateRowsResponse'] = _MUTATEROWSRESPONSE -DESCRIPTOR.message_types_by_name['CheckAndMutateRowRequest'] = _CHECKANDMUTATEROWREQUEST -DESCRIPTOR.message_types_by_name['CheckAndMutateRowResponse'] = _CHECKANDMUTATEROWRESPONSE -DESCRIPTOR.message_types_by_name['ReadModifyWriteRowRequest'] = _READMODIFYWRITEROWREQUEST - -ReadRowsRequest = _reflection.GeneratedProtocolMessageType('ReadRowsRequest', (_message.Message,), dict( - DESCRIPTOR = _READROWSREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsRequest) - )) -_sym_db.RegisterMessage(ReadRowsRequest) - -ReadRowsResponse = _reflection.GeneratedProtocolMessageType('ReadRowsResponse', (_message.Message,), dict( - - Chunk = _reflection.GeneratedProtocolMessageType('Chunk', (_message.Message,), dict( - DESCRIPTOR = _READROWSRESPONSE_CHUNK, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse.Chunk) - )) - , - DESCRIPTOR = _READROWSRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadRowsResponse) - )) -_sym_db.RegisterMessage(ReadRowsResponse) -_sym_db.RegisterMessage(ReadRowsResponse.Chunk) - -SampleRowKeysRequest = _reflection.GeneratedProtocolMessageType('SampleRowKeysRequest', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysRequest) - )) -_sym_db.RegisterMessage(SampleRowKeysRequest) - -SampleRowKeysResponse = _reflection.GeneratedProtocolMessageType('SampleRowKeysResponse', (_message.Message,), dict( - DESCRIPTOR = _SAMPLEROWKEYSRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.SampleRowKeysResponse) - )) -_sym_db.RegisterMessage(SampleRowKeysResponse) - -MutateRowRequest = _reflection.GeneratedProtocolMessageType('MutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowRequest) - )) -_sym_db.RegisterMessage(MutateRowRequest) - -MutateRowsRequest = _reflection.GeneratedProtocolMessageType('MutateRowsRequest', (_message.Message,), dict( - - Entry = _reflection.GeneratedProtocolMessageType('Entry', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSREQUEST_ENTRY, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest.Entry) - )) - , - DESCRIPTOR = _MUTATEROWSREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsRequest) - )) -_sym_db.RegisterMessage(MutateRowsRequest) -_sym_db.RegisterMessage(MutateRowsRequest.Entry) - -MutateRowsResponse = _reflection.GeneratedProtocolMessageType('MutateRowsResponse', (_message.Message,), dict( - DESCRIPTOR = _MUTATEROWSRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.MutateRowsResponse) - )) -_sym_db.RegisterMessage(MutateRowsResponse) - -CheckAndMutateRowRequest = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowRequest', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowRequest) - )) -_sym_db.RegisterMessage(CheckAndMutateRowRequest) - -CheckAndMutateRowResponse = _reflection.GeneratedProtocolMessageType('CheckAndMutateRowResponse', (_message.Message,), dict( - DESCRIPTOR = _CHECKANDMUTATEROWRESPONSE, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.CheckAndMutateRowResponse) - )) -_sym_db.RegisterMessage(CheckAndMutateRowResponse) - -ReadModifyWriteRowRequest = _reflection.GeneratedProtocolMessageType('ReadModifyWriteRowRequest', (_message.Message,), dict( - DESCRIPTOR = _READMODIFYWRITEROWREQUEST, - __module__ = 'google.bigtable.v1.bigtable_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.v1.ReadModifyWriteRowRequest) - )) -_sym_db.RegisterMessage(ReadModifyWriteRowRequest) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\034BigtableServiceMessagesProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_service_pb2.py b/gcloud/bigtable/_generated/bigtable_service_pb2.py deleted file mode 100644 index 901ffb6103c5..000000000000 --- a/gcloud/bigtable/_generated/bigtable_service_pb2.py +++ /dev/null @@ -1,167 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/v1/bigtable_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated import bigtable_data_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__data__pb2 -from gcloud.bigtable._generated import bigtable_service_messages_pb2 as google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/v1/bigtable_service.proto', - package='google.bigtable.v1', - syntax='proto3', - serialized_pb=b'\n)google/bigtable/v1/bigtable_service.proto\x12\x12google.bigtable.v1\x1a\x1cgoogle/api/annotations.proto\x1a&google/bigtable/v1/bigtable_data.proto\x1a\x32google/bigtable/v1/bigtable_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\xdd\x08\n\x0f\x42igtableService\x12\xa5\x01\n\x08ReadRows\x12#.google.bigtable.v1.ReadRowsRequest\x1a$.google.bigtable.v1.ReadRowsResponse\"L\x82\xd3\xe4\x93\x02\x46\"A/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read:\x01*0\x01\x12\xb7\x01\n\rSampleRowKeys\x12(.google.bigtable.v1.SampleRowKeysRequest\x1a).google.bigtable.v1.SampleRowKeysResponse\"O\x82\xd3\xe4\x93\x02I\x12G/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys0\x01\x12\xa3\x01\n\tMutateRow\x12$.google.bigtable.v1.MutateRowRequest\x1a\x16.google.protobuf.Empty\"X\x82\xd3\xe4\x93\x02R\"M/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate:\x01*\x12\xaa\x01\n\nMutateRows\x12%.google.bigtable.v1.MutateRowsRequest\x1a&.google.bigtable.v1.MutateRowsResponse\"M\x82\xd3\xe4\x93\x02G\"B/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows:\x01*\x12\xd2\x01\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v1.CheckAndMutateRowRequest\x1a-.google.bigtable.v1.CheckAndMutateRowResponse\"`\x82\xd3\xe4\x93\x02Z\"U/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate:\x01*\x12\xbf\x01\n\x12ReadModifyWriteRow\x12-.google.bigtable.v1.ReadModifyWriteRowRequest\x1a\x17.google.bigtable.v1.Row\"a\x82\xd3\xe4\x93\x02[\"V/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite:\x01*B4\n\x16\x63om.google.bigtable.v1B\x15\x42igtableServicesProtoP\x01\x88\x01\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_v1_dot_bigtable__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\026com.google.bigtable.v1B\025BigtableServicesProtoP\001\210\001\001') -import abc -from grpc.beta import implementations as beta_implementations -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -class BetaBigtableServiceServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ReadRows(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def SampleRowKeys(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def MutateRow(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def MutateRows(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CheckAndMutateRow(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ReadModifyWriteRow(self, request, context): - raise NotImplementedError() - -class BetaBigtableServiceStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def ReadRows(self, request, timeout): - raise NotImplementedError() - @abc.abstractmethod - def SampleRowKeys(self, request, timeout): - raise NotImplementedError() - @abc.abstractmethod - def MutateRow(self, request, timeout): - raise NotImplementedError() - MutateRow.future = None - @abc.abstractmethod - def MutateRows(self, request, timeout): - raise NotImplementedError() - MutateRows.future = None - @abc.abstractmethod - def CheckAndMutateRow(self, request, timeout): - raise NotImplementedError() - CheckAndMutateRow.future = None - @abc.abstractmethod - def ReadModifyWriteRow(self, request, timeout): - raise NotImplementedError() - ReadModifyWriteRow.future = None - -def beta_create_BigtableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_data_pb2 - request_deserializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.FromString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.FromString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.FromString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.FromString, - } - response_serializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.SerializeToString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.SerializeToString, - } - method_implementations = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): face_utilities.unary_unary_inline(servicer.CheckAndMutateRow), - ('google.bigtable.v1.BigtableService', 'MutateRow'): face_utilities.unary_unary_inline(servicer.MutateRow), - ('google.bigtable.v1.BigtableService', 'MutateRows'): face_utilities.unary_unary_inline(servicer.MutateRows), - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): face_utilities.unary_unary_inline(servicer.ReadModifyWriteRow), - ('google.bigtable.v1.BigtableService', 'ReadRows'): face_utilities.unary_stream_inline(servicer.ReadRows), - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): face_utilities.unary_stream_inline(servicer.SampleRowKeys), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_BigtableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_data_pb2 - request_serializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadModifyWriteRowRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsRequest.SerializeToString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysRequest.SerializeToString, - } - response_deserializers = { - ('google.bigtable.v1.BigtableService', 'CheckAndMutateRow'): gcloud.bigtable._generated.bigtable_service_messages_pb2.CheckAndMutateRowResponse.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRow'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.v1.BigtableService', 'MutateRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.MutateRowsResponse.FromString, - ('google.bigtable.v1.BigtableService', 'ReadModifyWriteRow'): gcloud.bigtable._generated.bigtable_data_pb2.Row.FromString, - ('google.bigtable.v1.BigtableService', 'ReadRows'): gcloud.bigtable._generated.bigtable_service_messages_pb2.ReadRowsResponse.FromString, - ('google.bigtable.v1.BigtableService', 'SampleRowKeys'): gcloud.bigtable._generated.bigtable_service_messages_pb2.SampleRowKeysResponse.FromString, - } - cardinalities = { - 'CheckAndMutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRow': cardinality.Cardinality.UNARY_UNARY, - 'MutateRows': cardinality.Cardinality.UNARY_UNARY, - 'ReadModifyWriteRow': cardinality.Cardinality.UNARY_UNARY, - 'ReadRows': cardinality.Cardinality.UNARY_STREAM, - 'SampleRowKeys': cardinality.Cardinality.UNARY_STREAM, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.v1.BigtableService', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py b/gcloud/bigtable/_generated/bigtable_table_admin_pb2.py similarity index 99% rename from gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py rename to gcloud/bigtable/_generated/bigtable_table_admin_pb2.py index c929b222b78b..e57a26f3512c 100644 --- a/gcloud/bigtable/_generated_v2/bigtable_table_admin_pb2.py +++ b/gcloud/bigtable/_generated/bigtable_table_admin_pb2.py @@ -14,7 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated_v2 import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2 +from gcloud.bigtable._generated import table_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_table__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 diff --git a/gcloud/bigtable/_generated/bigtable_table_data_pb2.py b/gcloud/bigtable/_generated/bigtable_table_data_pb2.py deleted file mode 100644 index fd47b567b3c5..000000000000 --- a/gcloud/bigtable/_generated/bigtable_table_data_pb2.py +++ /dev/null @@ -1,377 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/table/v1/bigtable_table_data.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 -from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/table/v1/bigtable_table_data.proto', - package='google.bigtable.admin.table.v1', - syntax='proto3', - serialized_pb=b'\n8google/bigtable/admin/table/v1/bigtable_table_data.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\"\xfd\x02\n\x05Table\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x11\x63urrent_operation\x18\x02 \x01(\x0b\x32\x1d.google.longrunning.Operation\x12R\n\x0f\x63olumn_families\x18\x03 \x03(\x0b\x32\x39.google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry\x12O\n\x0bgranularity\x18\x04 \x01(\x0e\x32:.google.bigtable.admin.table.v1.Table.TimestampGranularity\x1a\x63\n\x13\x43olumnFamiliesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily:\x02\x38\x01\"\"\n\x14TimestampGranularity\x12\n\n\x06MILLIS\x10\x00\"l\n\x0c\x43olumnFamily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x15\n\rgc_expression\x18\x02 \x01(\t\x12\x37\n\x07gc_rule\x18\x03 \x01(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\"\xed\x02\n\x06GcRule\x12\x1a\n\x10max_num_versions\x18\x01 \x01(\x05H\x00\x12,\n\x07max_age\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationH\x00\x12K\n\x0cintersection\x18\x03 \x01(\x0b\x32\x33.google.bigtable.admin.table.v1.GcRule.IntersectionH\x00\x12=\n\x05union\x18\x04 \x01(\x0b\x32,.google.bigtable.admin.table.v1.GcRule.UnionH\x00\x1a\x45\n\x0cIntersection\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRule\x1a>\n\x05Union\x12\x35\n\x05rules\x18\x01 \x03(\x0b\x32&.google.bigtable.admin.table.v1.GcRuleB\x06\n\x04ruleB>\n\"com.google.bigtable.admin.table.v1B\x16\x42igtableTableDataProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_longrunning_dot_operations__pb2.DESCRIPTOR,google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - -_TABLE_TIMESTAMPGRANULARITY = _descriptor.EnumDescriptor( - name='TimestampGranularity', - full_name='google.bigtable.admin.table.v1.Table.TimestampGranularity', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MILLIS', index=0, number=0, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=509, - serialized_end=543, -) -_sym_db.RegisterEnumDescriptor(_TABLE_TIMESTAMPGRANULARITY) - - -_TABLE_COLUMNFAMILIESENTRY = _descriptor.Descriptor( - name='ColumnFamiliesEntry', - full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=408, - serialized_end=507, -) - -_TABLE = _descriptor.Descriptor( - name='Table', - full_name='google.bigtable.admin.table.v1.Table', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.Table.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='current_operation', full_name='google.bigtable.admin.table.v1.Table.current_operation', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_families', full_name='google.bigtable.admin.table.v1.Table.column_families', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='granularity', full_name='google.bigtable.admin.table.v1.Table.granularity', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_TABLE_COLUMNFAMILIESENTRY, ], - enum_types=[ - _TABLE_TIMESTAMPGRANULARITY, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=162, - serialized_end=543, -) - - -_COLUMNFAMILY = _descriptor.Descriptor( - name='ColumnFamily', - full_name='google.bigtable.admin.table.v1.ColumnFamily', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.ColumnFamily.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gc_expression', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_expression', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gc_rule', full_name='google.bigtable.admin.table.v1.ColumnFamily.gc_rule', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=545, - serialized_end=653, -) - - -_GCRULE_INTERSECTION = _descriptor.Descriptor( - name='Intersection', - full_name='google.bigtable.admin.table.v1.GcRule.Intersection', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Intersection.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=880, - serialized_end=949, -) - -_GCRULE_UNION = _descriptor.Descriptor( - name='Union', - full_name='google.bigtable.admin.table.v1.GcRule.Union', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rules', full_name='google.bigtable.admin.table.v1.GcRule.Union.rules', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=951, - serialized_end=1013, -) - -_GCRULE = _descriptor.Descriptor( - name='GcRule', - full_name='google.bigtable.admin.table.v1.GcRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='max_num_versions', full_name='google.bigtable.admin.table.v1.GcRule.max_num_versions', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_age', full_name='google.bigtable.admin.table.v1.GcRule.max_age', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='intersection', full_name='google.bigtable.admin.table.v1.GcRule.intersection', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='union', full_name='google.bigtable.admin.table.v1.GcRule.union', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[_GCRULE_INTERSECTION, _GCRULE_UNION, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - _descriptor.OneofDescriptor( - name='rule', full_name='google.bigtable.admin.table.v1.GcRule.rule', - index=0, containing_type=None, fields=[]), - ], - serialized_start=656, - serialized_end=1021, -) - -_TABLE_COLUMNFAMILIESENTRY.fields_by_name['value'].message_type = _COLUMNFAMILY -_TABLE_COLUMNFAMILIESENTRY.containing_type = _TABLE -_TABLE.fields_by_name['current_operation'].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION -_TABLE.fields_by_name['column_families'].message_type = _TABLE_COLUMNFAMILIESENTRY -_TABLE.fields_by_name['granularity'].enum_type = _TABLE_TIMESTAMPGRANULARITY -_TABLE_TIMESTAMPGRANULARITY.containing_type = _TABLE -_COLUMNFAMILY.fields_by_name['gc_rule'].message_type = _GCRULE -_GCRULE_INTERSECTION.fields_by_name['rules'].message_type = _GCRULE -_GCRULE_INTERSECTION.containing_type = _GCRULE -_GCRULE_UNION.fields_by_name['rules'].message_type = _GCRULE -_GCRULE_UNION.containing_type = _GCRULE -_GCRULE.fields_by_name['max_age'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION -_GCRULE.fields_by_name['intersection'].message_type = _GCRULE_INTERSECTION -_GCRULE.fields_by_name['union'].message_type = _GCRULE_UNION -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_num_versions']) -_GCRULE.fields_by_name['max_num_versions'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['max_age']) -_GCRULE.fields_by_name['max_age'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['intersection']) -_GCRULE.fields_by_name['intersection'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -_GCRULE.oneofs_by_name['rule'].fields.append( - _GCRULE.fields_by_name['union']) -_GCRULE.fields_by_name['union'].containing_oneof = _GCRULE.oneofs_by_name['rule'] -DESCRIPTOR.message_types_by_name['Table'] = _TABLE -DESCRIPTOR.message_types_by_name['ColumnFamily'] = _COLUMNFAMILY -DESCRIPTOR.message_types_by_name['GcRule'] = _GCRULE - -Table = _reflection.GeneratedProtocolMessageType('Table', (_message.Message,), dict( - - ColumnFamiliesEntry = _reflection.GeneratedProtocolMessageType('ColumnFamiliesEntry', (_message.Message,), dict( - DESCRIPTOR = _TABLE_COLUMNFAMILIESENTRY, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table.ColumnFamiliesEntry) - )) - , - DESCRIPTOR = _TABLE, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.Table) - )) -_sym_db.RegisterMessage(Table) -_sym_db.RegisterMessage(Table.ColumnFamiliesEntry) - -ColumnFamily = _reflection.GeneratedProtocolMessageType('ColumnFamily', (_message.Message,), dict( - DESCRIPTOR = _COLUMNFAMILY, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ColumnFamily) - )) -_sym_db.RegisterMessage(ColumnFamily) - -GcRule = _reflection.GeneratedProtocolMessageType('GcRule', (_message.Message,), dict( - - Intersection = _reflection.GeneratedProtocolMessageType('Intersection', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_INTERSECTION, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Intersection) - )) - , - - Union = _reflection.GeneratedProtocolMessageType('Union', (_message.Message,), dict( - DESCRIPTOR = _GCRULE_UNION, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule.Union) - )) - , - DESCRIPTOR = _GCRULE, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_data_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GcRule) - )) -_sym_db.RegisterMessage(GcRule) -_sym_db.RegisterMessage(GcRule.Intersection) -_sym_db.RegisterMessage(GcRule.Union) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\026BigtableTableDataProtoP\001') -_TABLE_COLUMNFAMILIESENTRY.has_options = True -_TABLE_COLUMNFAMILIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py b/gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py deleted file mode 100644 index 582dfed94612..000000000000 --- a/gcloud/bigtable/_generated/bigtable_table_service_messages_pb2.py +++ /dev/null @@ -1,389 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/table/v1/bigtable_table_service_messages.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/table/v1/bigtable_table_service_messages.proto', - package='google.bigtable.admin.table.v1', - syntax='proto3', - serialized_pb=b'\nDgoogle/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\"\x86\x01\n\x12\x43reateTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08table_id\x18\x02 \x01(\t\x12\x34\n\x05table\x18\x03 \x01(\x0b\x32%.google.bigtable.admin.table.v1.Table\x12\x1a\n\x12initial_split_keys\x18\x04 \x03(\t\"!\n\x11ListTablesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"K\n\x12ListTablesResponse\x12\x35\n\x06tables\x18\x01 \x03(\x0b\x32%.google.bigtable.admin.table.v1.Table\"\x1f\n\x0fGetTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\"\n\x12\x44\x65leteTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"2\n\x12RenameTableRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06new_id\x18\x02 \x01(\t\"\x88\x01\n\x19\x43reateColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_family_id\x18\x02 \x01(\t\x12\x43\n\rcolumn_family\x18\x03 \x01(\x0b\x32,.google.bigtable.admin.table.v1.ColumnFamily\")\n\x19\x44\x65leteColumnFamilyRequest\x12\x0c\n\x04name\x18\x01 \x01(\tBI\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - -_CREATETABLEREQUEST = _descriptor.Descriptor( - name='CreateTableRequest', - full_name='google.bigtable.admin.table.v1.CreateTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.CreateTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='table_id', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='table', full_name='google.bigtable.admin.table.v1.CreateTableRequest.table', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='initial_split_keys', full_name='google.bigtable.admin.table.v1.CreateTableRequest.initial_split_keys', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=163, - serialized_end=297, -) - - -_LISTTABLESREQUEST = _descriptor.Descriptor( - name='ListTablesRequest', - full_name='google.bigtable.admin.table.v1.ListTablesRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.ListTablesRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=299, - serialized_end=332, -) - - -_LISTTABLESRESPONSE = _descriptor.Descriptor( - name='ListTablesResponse', - full_name='google.bigtable.admin.table.v1.ListTablesResponse', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='tables', full_name='google.bigtable.admin.table.v1.ListTablesResponse.tables', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=334, - serialized_end=409, -) - - -_GETTABLEREQUEST = _descriptor.Descriptor( - name='GetTableRequest', - full_name='google.bigtable.admin.table.v1.GetTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.GetTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=411, - serialized_end=442, -) - - -_DELETETABLEREQUEST = _descriptor.Descriptor( - name='DeleteTableRequest', - full_name='google.bigtable.admin.table.v1.DeleteTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.DeleteTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=444, - serialized_end=478, -) - - -_RENAMETABLEREQUEST = _descriptor.Descriptor( - name='RenameTableRequest', - full_name='google.bigtable.admin.table.v1.RenameTableRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.RenameTableRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_id', full_name='google.bigtable.admin.table.v1.RenameTableRequest.new_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=480, - serialized_end=530, -) - - -_CREATECOLUMNFAMILYREQUEST = _descriptor.Descriptor( - name='CreateColumnFamilyRequest', - full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_family_id', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='column_family', full_name='google.bigtable.admin.table.v1.CreateColumnFamilyRequest.column_family', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=533, - serialized_end=669, -) - - -_DELETECOLUMNFAMILYREQUEST = _descriptor.Descriptor( - name='DeleteColumnFamilyRequest', - full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='google.bigtable.admin.table.v1.DeleteColumnFamilyRequest.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=671, - serialized_end=712, -) - -_CREATETABLEREQUEST.fields_by_name['table'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE -_LISTTABLESRESPONSE.fields_by_name['tables'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._TABLE -_CREATECOLUMNFAMILYREQUEST.fields_by_name['column_family'].message_type = google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2._COLUMNFAMILY -DESCRIPTOR.message_types_by_name['CreateTableRequest'] = _CREATETABLEREQUEST -DESCRIPTOR.message_types_by_name['ListTablesRequest'] = _LISTTABLESREQUEST -DESCRIPTOR.message_types_by_name['ListTablesResponse'] = _LISTTABLESRESPONSE -DESCRIPTOR.message_types_by_name['GetTableRequest'] = _GETTABLEREQUEST -DESCRIPTOR.message_types_by_name['DeleteTableRequest'] = _DELETETABLEREQUEST -DESCRIPTOR.message_types_by_name['RenameTableRequest'] = _RENAMETABLEREQUEST -DESCRIPTOR.message_types_by_name['CreateColumnFamilyRequest'] = _CREATECOLUMNFAMILYREQUEST -DESCRIPTOR.message_types_by_name['DeleteColumnFamilyRequest'] = _DELETECOLUMNFAMILYREQUEST - -CreateTableRequest = _reflection.GeneratedProtocolMessageType('CreateTableRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATETABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateTableRequest) - )) -_sym_db.RegisterMessage(CreateTableRequest) - -ListTablesRequest = _reflection.GeneratedProtocolMessageType('ListTablesRequest', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesRequest) - )) -_sym_db.RegisterMessage(ListTablesRequest) - -ListTablesResponse = _reflection.GeneratedProtocolMessageType('ListTablesResponse', (_message.Message,), dict( - DESCRIPTOR = _LISTTABLESRESPONSE, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.ListTablesResponse) - )) -_sym_db.RegisterMessage(ListTablesResponse) - -GetTableRequest = _reflection.GeneratedProtocolMessageType('GetTableRequest', (_message.Message,), dict( - DESCRIPTOR = _GETTABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.GetTableRequest) - )) -_sym_db.RegisterMessage(GetTableRequest) - -DeleteTableRequest = _reflection.GeneratedProtocolMessageType('DeleteTableRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETETABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteTableRequest) - )) -_sym_db.RegisterMessage(DeleteTableRequest) - -RenameTableRequest = _reflection.GeneratedProtocolMessageType('RenameTableRequest', (_message.Message,), dict( - DESCRIPTOR = _RENAMETABLEREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.RenameTableRequest) - )) -_sym_db.RegisterMessage(RenameTableRequest) - -CreateColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('CreateColumnFamilyRequest', (_message.Message,), dict( - DESCRIPTOR = _CREATECOLUMNFAMILYREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.CreateColumnFamilyRequest) - )) -_sym_db.RegisterMessage(CreateColumnFamilyRequest) - -DeleteColumnFamilyRequest = _reflection.GeneratedProtocolMessageType('DeleteColumnFamilyRequest', (_message.Message,), dict( - DESCRIPTOR = _DELETECOLUMNFAMILYREQUEST, - __module__ = 'google.bigtable.admin.table.v1.bigtable_table_service_messages_pb2' - # @@protoc_insertion_point(class_scope:google.bigtable.admin.table.v1.DeleteColumnFamilyRequest) - )) -_sym_db.RegisterMessage(DeleteColumnFamilyRequest) - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B!BigtableTableServiceMessagesProtoP\001') -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated/bigtable_table_service_pb2.py b/gcloud/bigtable/_generated/bigtable_table_service_pb2.py deleted file mode 100644 index c77a09296fa4..000000000000 --- a/gcloud/bigtable/_generated/bigtable_table_service_pb2.py +++ /dev/null @@ -1,203 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: google/bigtable/admin/table/v1/bigtable_table_service.proto - -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated import bigtable_table_data_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2 -from gcloud.bigtable._generated import bigtable_table_service_messages_pb2 as google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2 -from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='google/bigtable/admin/table/v1/bigtable_table_service.proto', - package='google.bigtable.admin.table.v1', - syntax='proto3', - serialized_pb=b'\n;google/bigtable/admin/table/v1/bigtable_table_service.proto\x12\x1egoogle.bigtable.admin.table.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x38google/bigtable/admin/table/v1/bigtable_table_data.proto\x1a\x44google/bigtable/admin/table/v1/bigtable_table_service_messages.proto\x1a\x1bgoogle/protobuf/empty.proto2\x89\x0b\n\x14\x42igtableTableService\x12\xa4\x01\n\x0b\x43reateTable\x12\x32.google.bigtable.admin.table.v1.CreateTableRequest\x1a%.google.bigtable.admin.table.v1.Table\":\x82\xd3\xe4\x93\x02\x34\"//v1/{name=projects/*/zones/*/clusters/*}/tables:\x01*\x12\xac\x01\n\nListTables\x12\x31.google.bigtable.admin.table.v1.ListTablesRequest\x1a\x32.google.bigtable.admin.table.v1.ListTablesResponse\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{name=projects/*/zones/*/clusters/*}/tables\x12\x9d\x01\n\x08GetTable\x12/.google.bigtable.admin.table.v1.GetTableRequest\x1a%.google.bigtable.admin.table.v1.Table\"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x94\x01\n\x0b\x44\x65leteTable\x12\x32.google.bigtable.admin.table.v1.DeleteTableRequest\x1a\x16.google.protobuf.Empty\"9\x82\xd3\xe4\x93\x02\x33*1/v1/{name=projects/*/zones/*/clusters/*/tables/*}\x12\x9e\x01\n\x0bRenameTable\x12\x32.google.bigtable.admin.table.v1.RenameTableRequest\x1a\x16.google.protobuf.Empty\"C\x82\xd3\xe4\x93\x02=\"8/v1/{name=projects/*/zones/*/clusters/*/tables/*}:rename:\x01*\x12\xca\x01\n\x12\x43reateColumnFamily\x12\x39.google.bigtable.admin.table.v1.CreateColumnFamilyRequest\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"K\x82\xd3\xe4\x93\x02\x45\"@/v1/{name=projects/*/zones/*/clusters/*/tables/*}/columnFamilies:\x01*\x12\xbf\x01\n\x12UpdateColumnFamily\x12,.google.bigtable.admin.table.v1.ColumnFamily\x1a,.google.bigtable.admin.table.v1.ColumnFamily\"M\x82\xd3\xe4\x93\x02G\x1a\x42/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}:\x01*\x12\xb3\x01\n\x12\x44\x65leteColumnFamily\x12\x39.google.bigtable.admin.table.v1.DeleteColumnFamilyRequest\x1a\x16.google.protobuf.Empty\"J\x82\xd3\xe4\x93\x02\x44*B/v1/{name=projects/*/zones/*/clusters/*/tables/*/columnFamilies/*}BB\n\"com.google.bigtable.admin.table.v1B\x1a\x42igtableTableServicesProtoP\x01\x62\x06proto3' - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__data__pb2.DESCRIPTOR,google_dot_bigtable_dot_admin_dot_table_dot_v1_dot_bigtable__table__service__messages__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,]) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - - - - - -DESCRIPTOR.has_options = True -DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\"com.google.bigtable.admin.table.v1B\032BigtableTableServicesProtoP\001') -import abc -from grpc.beta import implementations as beta_implementations -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - -class BetaBigtableTableServiceServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def CreateTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def ListTables(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def GetTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def RenameTable(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def CreateColumnFamily(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def UpdateColumnFamily(self, request, context): - raise NotImplementedError() - @abc.abstractmethod - def DeleteColumnFamily(self, request, context): - raise NotImplementedError() - -class BetaBigtableTableServiceStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def CreateTable(self, request, timeout): - raise NotImplementedError() - CreateTable.future = None - @abc.abstractmethod - def ListTables(self, request, timeout): - raise NotImplementedError() - ListTables.future = None - @abc.abstractmethod - def GetTable(self, request, timeout): - raise NotImplementedError() - GetTable.future = None - @abc.abstractmethod - def DeleteTable(self, request, timeout): - raise NotImplementedError() - DeleteTable.future = None - @abc.abstractmethod - def RenameTable(self, request, timeout): - raise NotImplementedError() - RenameTable.future = None - @abc.abstractmethod - def CreateColumnFamily(self, request, timeout): - raise NotImplementedError() - CreateColumnFamily.future = None - @abc.abstractmethod - def UpdateColumnFamily(self, request, timeout): - raise NotImplementedError() - UpdateColumnFamily.future = None - @abc.abstractmethod - def DeleteColumnFamily(self, request, timeout): - raise NotImplementedError() - DeleteColumnFamily.future = None - -def beta_create_BigtableTableService_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - request_deserializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString, - } - response_serializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString, - } - method_implementations = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): face_utilities.unary_unary_inline(servicer.CreateColumnFamily), - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): face_utilities.unary_unary_inline(servicer.CreateTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): face_utilities.unary_unary_inline(servicer.DeleteColumnFamily), - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): face_utilities.unary_unary_inline(servicer.DeleteTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): face_utilities.unary_unary_inline(servicer.GetTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): face_utilities.unary_unary_inline(servicer.ListTables), - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): face_utilities.unary_unary_inline(servicer.RenameTable), - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): face_utilities.unary_unary_inline(servicer.UpdateColumnFamily), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - -def beta_create_BigtableTableService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_data_pb2 - import gcloud.bigtable._generated.bigtable_table_service_messages_pb2 - import google.protobuf.empty_pb2 - request_serializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateColumnFamilyRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.CreateTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteColumnFamilyRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.DeleteTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.GetTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.RenameTableRequest.SerializeToString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.SerializeToString, - } - response_deserializers = { - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'CreateTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteColumnFamily'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'DeleteTable'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'GetTable'): gcloud.bigtable._generated.bigtable_table_data_pb2.Table.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'ListTables'): gcloud.bigtable._generated.bigtable_table_service_messages_pb2.ListTablesResponse.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'RenameTable'): google.protobuf.empty_pb2.Empty.FromString, - ('google.bigtable.admin.table.v1.BigtableTableService', 'UpdateColumnFamily'): gcloud.bigtable._generated.bigtable_table_data_pb2.ColumnFamily.FromString, - } - cardinalities = { - 'CreateColumnFamily': cardinality.Cardinality.UNARY_UNARY, - 'CreateTable': cardinality.Cardinality.UNARY_UNARY, - 'DeleteColumnFamily': cardinality.Cardinality.UNARY_UNARY, - 'DeleteTable': cardinality.Cardinality.UNARY_UNARY, - 'GetTable': cardinality.Cardinality.UNARY_UNARY, - 'ListTables': cardinality.Cardinality.UNARY_UNARY, - 'RenameTable': cardinality.Cardinality.UNARY_UNARY, - 'UpdateColumnFamily': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.bigtable.admin.table.v1.BigtableTableService', cardinalities, options=stub_options) -# @@protoc_insertion_point(module_scope) diff --git a/gcloud/bigtable/_generated_v2/common_pb2.py b/gcloud/bigtable/_generated/common_pb2.py similarity index 100% rename from gcloud/bigtable/_generated_v2/common_pb2.py rename to gcloud/bigtable/_generated/common_pb2.py diff --git a/gcloud/bigtable/_generated_v2/data_pb2.py b/gcloud/bigtable/_generated/data_pb2.py similarity index 100% rename from gcloud/bigtable/_generated_v2/data_pb2.py rename to gcloud/bigtable/_generated/data_pb2.py diff --git a/gcloud/bigtable/_generated_v2/instance_pb2.py b/gcloud/bigtable/_generated/instance_pb2.py similarity index 98% rename from gcloud/bigtable/_generated_v2/instance_pb2.py rename to gcloud/bigtable/_generated/instance_pb2.py index 2161bf33bf58..b34379864f8b 100644 --- a/gcloud/bigtable/_generated_v2/instance_pb2.py +++ b/gcloud/bigtable/_generated/instance_pb2.py @@ -14,7 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 -from gcloud.bigtable._generated_v2 import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 +from gcloud.bigtable._generated import common_pb2 as google_dot_bigtable_dot_admin_dot_v2_dot_common__pb2 DESCRIPTOR = _descriptor.FileDescriptor( diff --git a/gcloud/bigtable/_generated/operations_grpc_pb2.py b/gcloud/bigtable/_generated/operations_grpc_pb2.py index e4911b389f25..5723e1d99fe0 100644 --- a/gcloud/bigtable/_generated/operations_grpc_pb2.py +++ b/gcloud/bigtable/_generated/operations_grpc_pb2.py @@ -1,64 +1,235 @@ -import abc +from google.longrunning.operations_pb2 import ( + CancelOperationRequest, + DeleteOperationRequest, + GetOperationRequest, + ListOperationsRequest, + ListOperationsResponse, + Operation, + google_dot_protobuf_dot_empty__pb2, +) from grpc.beta import implementations as beta_implementations +from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities + +class OperationsStub(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GetOperation = channel.unary_unary( + '/google.longrunning.Operations/GetOperation', + request_serializer=GetOperationRequest.SerializeToString, + response_deserializer=Operation.FromString, + ) + self.ListOperations = channel.unary_unary( + '/google.longrunning.Operations/ListOperations', + request_serializer=ListOperationsRequest.SerializeToString, + response_deserializer=ListOperationsResponse.FromString, + ) + self.CancelOperation = channel.unary_unary( + '/google.longrunning.Operations/CancelOperation', + request_serializer=CancelOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.DeleteOperation = channel.unary_unary( + '/google.longrunning.Operations/DeleteOperation', + request_serializer=DeleteOperationRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + + +class OperationsServicer(object): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + + def GetOperation(self, request, context): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListOperations(self, request, context): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelOperation(self, request, context): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteOperation(self, request, context): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_OperationsServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GetOperation': grpc.unary_unary_rpc_method_handler( + servicer.GetOperation, + request_deserializer=GetOperationRequest.FromString, + response_serializer=Operation.SerializeToString, + ), + 'ListOperations': grpc.unary_unary_rpc_method_handler( + servicer.ListOperations, + request_deserializer=ListOperationsRequest.FromString, + response_serializer=ListOperationsResponse.SerializeToString, + ), + 'CancelOperation': grpc.unary_unary_rpc_method_handler( + servicer.CancelOperation, + request_deserializer=CancelOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + 'DeleteOperation': grpc.unary_unary_rpc_method_handler( + servicer.DeleteOperation, + request_deserializer=DeleteOperationRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.longrunning.Operations', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + class BetaOperationsServicer(object): - """""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ def GetOperation(self, request, context): - raise NotImplementedError() - @abc.abstractmethod + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListOperations(self, request, context): - raise NotImplementedError() - @abc.abstractmethod + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def CancelOperation(self, request, context): - raise NotImplementedError() - @abc.abstractmethod + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteOperation(self, request, context): - raise NotImplementedError() + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ + context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) + class BetaOperationsStub(object): - """The interface to which stubs will conform.""" - __metaclass__ = abc.ABCMeta - @abc.abstractmethod - def GetOperation(self, request, timeout): + """Manages long-running operations with an API service. + + When an API method normally takes long time to complete, it can be designed + to return [Operation][google.longrunning.Operation] to the client, and the client can use this + interface to receive the real response asynchronously by polling the + operation resource, or using `google.watcher.v1.Watcher` interface to watch + the response, or pass the operation resource to another API (such as Google + Cloud Pub/Sub API) to receive the response. Any API service that returns + long-running operations should implement the `Operations` interface so + developers can have a consistent client experience. + """ + def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Gets the latest state of a long-running operation. Clients may use this + method to poll the operation result at intervals as recommended by the API + service. + """ raise NotImplementedError() GetOperation.future = None - @abc.abstractmethod - def ListOperations(self, request, timeout): + def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Lists operations that match the specified filter in the request. If the + server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + """ raise NotImplementedError() ListOperations.future = None - @abc.abstractmethod - def CancelOperation(self, request, timeout): + def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Starts asynchronous cancellation on a long-running operation. The server + makes a best effort to cancel the operation, but success is not + guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. Clients may use + [Operations.GetOperation] or other methods to check whether the + cancellation succeeded or the operation completed despite cancellation. + """ raise NotImplementedError() CancelOperation.future = None - @abc.abstractmethod - def DeleteOperation(self, request, timeout): + def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): + """Deletes a long-running operation. It indicates the client is no longer + interested in the operation result. It does not cancel the operation. + """ raise NotImplementedError() DeleteOperation.future = None + def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 request_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.FromString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.FromString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.FromString, + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, } response_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.SerializeToString, + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, } method_implementations = { ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), @@ -69,26 +240,19 @@ def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_t server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) + def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 - import google.longrunning.operations_pb2 - import google.protobuf.empty_pb2 request_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.longrunning.operations_pb2.CancelOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): google.longrunning.operations_pb2.DeleteOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.GetOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsRequest.SerializeToString, + ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, } response_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): google.protobuf.empty_pb2.Empty.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): google.protobuf.empty_pb2.Empty.FromString, - ('google.longrunning.Operations', 'GetOperation'): google.longrunning.operations_pb2.Operation.FromString, - ('google.longrunning.Operations', 'ListOperations'): google.longrunning.operations_pb2.ListOperationsResponse.FromString, + ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, + ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, } cardinalities = { 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, diff --git a/gcloud/bigtable/_generated_v2/table_pb2.py b/gcloud/bigtable/_generated/table_pb2.py similarity index 100% rename from gcloud/bigtable/_generated_v2/table_pb2.py rename to gcloud/bigtable/_generated/table_pb2.py diff --git a/gcloud/bigtable/_generated_v2/__init__.py b/gcloud/bigtable/_generated_v2/__init__.py deleted file mode 100644 index ad35adcf05ae..000000000000 --- a/gcloud/bigtable/_generated_v2/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Generated protobuf modules for Google Cloud Bigtable API.""" diff --git a/gcloud/bigtable/_generated_v2/_operations.proto b/gcloud/bigtable/_generated_v2/_operations.proto deleted file mode 100644 index a358d0a38787..000000000000 --- a/gcloud/bigtable/_generated_v2/_operations.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.longrunning; - -import "google/api/annotations.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/empty.proto"; -import "google/rpc/status.proto"; - -option java_multiple_files = true; -option java_outer_classname = "OperationsProto"; -option java_package = "com.google.longrunning"; - - -// Manages long-running operations with an API service. -// -// When an API method normally takes long time to complete, it can be designed -// to return [Operation][google.longrunning.Operation] to the client, and the client can use this -// interface to receive the real response asynchronously by polling the -// operation resource, or using `google.watcher.v1.Watcher` interface to watch -// the response, or pass the operation resource to another API (such as Google -// Cloud Pub/Sub API) to receive the response. Any API service that returns -// long-running operations should implement the `Operations` interface so -// developers can have a consistent client experience. -service Operations { - // Gets the latest state of a long-running operation. Clients may use this - // method to poll the operation result at intervals as recommended by the API - // service. - rpc GetOperation(GetOperationRequest) returns (Operation) { - option (google.api.http) = { get: "/v1/{name=operations/**}" }; - } - - // Lists operations that match the specified filter in the request. If the - // server doesn't support this method, it returns - // `google.rpc.Code.UNIMPLEMENTED`. - rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { - option (google.api.http) = { get: "/v1/{name=operations}" }; - } - - // Starts asynchronous cancellation on a long-running operation. The server - // makes a best effort to cancel the operation, but success is not - // guaranteed. If the server doesn't support this method, it returns - // `google.rpc.Code.UNIMPLEMENTED`. Clients may use - // [Operations.GetOperation] or other methods to check whether the - // cancellation succeeded or the operation completed despite cancellation. - rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; - } - - // Deletes a long-running operation. It indicates the client is no longer - // interested in the operation result. It does not cancel the operation. - rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { delete: "/v1/{name=operations/**}" }; - } -} - -// This resource represents a long-running operation that is the result of a -// network API call. -message Operation { - // The name of the operation resource, which is only unique within the same - // service that originally returns it. - string name = 1; - - // Some service-specific metadata associated with the operation. It typically - // contains progress information and common metadata such as create time. - // Some services may not provide such metadata. Any method that returns a - // long-running operation should document the metadata type, if any. - google.protobuf.Any metadata = 2; - - // If the value is false, it means the operation is still in progress. - // If true, the operation is completed and the `result` is available. - bool done = 3; - - oneof result { - // The error result of the operation in case of failure. - google.rpc.Status error = 4; - - // The normal response of the operation in case of success. If the original - // method returns no data on success, such as `Delete`, the response will be - // `google.protobuf.Empty`. If the original method is standard - // `Get`/`Create`/`Update`, the response should be the resource. For other - // methods, the response should have the type `XxxResponse`, where `Xxx` - // is the original method name. For example, if the original method name - // is `TakeSnapshot()`, the inferred response type will be - // `TakeSnapshotResponse`. - google.protobuf.Any response = 5; - } -} - -// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. -message GetOperationRequest { - // The name of the operation resource. - string name = 1; -} - -// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. -message ListOperationsRequest { - // The name of the operation collection. - string name = 4; - - // The standard List filter. - string filter = 1; - - // The standard List page size. - int32 page_size = 2; - - // The standard List page token. - string page_token = 3; -} - -// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. -message ListOperationsResponse { - // A list of operations that match the specified filter in the request. - repeated Operation operations = 1; - - // The standard List next-page token. - string next_page_token = 2; -} - -// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. -message CancelOperationRequest { - // The name of the operation resource to be cancelled. - string name = 1; -} - -// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. -message DeleteOperationRequest { - // The name of the operation resource to be deleted. - string name = 1; -} diff --git a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py b/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py deleted file mode 100644 index 5723e1d99fe0..000000000000 --- a/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py +++ /dev/null @@ -1,264 +0,0 @@ -from google.longrunning.operations_pb2 import ( - CancelOperationRequest, - DeleteOperationRequest, - GetOperationRequest, - ListOperationsRequest, - ListOperationsResponse, - Operation, - google_dot_protobuf_dot_empty__pb2, -) -from grpc.beta import implementations as beta_implementations -from grpc.beta import interfaces as beta_interfaces -from grpc.framework.common import cardinality -from grpc.framework.interfaces.face import utilities as face_utilities - - -class OperationsStub(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GetOperation = channel.unary_unary( - '/google.longrunning.Operations/GetOperation', - request_serializer=GetOperationRequest.SerializeToString, - response_deserializer=Operation.FromString, - ) - self.ListOperations = channel.unary_unary( - '/google.longrunning.Operations/ListOperations', - request_serializer=ListOperationsRequest.SerializeToString, - response_deserializer=ListOperationsResponse.FromString, - ) - self.CancelOperation = channel.unary_unary( - '/google.longrunning.Operations/CancelOperation', - request_serializer=CancelOperationRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - self.DeleteOperation = channel.unary_unary( - '/google.longrunning.Operations/DeleteOperation', - request_serializer=DeleteOperationRequest.SerializeToString, - response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ) - - -class OperationsServicer(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - - def GetOperation(self, request, context): - """Gets the latest state of a long-running operation. Clients may use this - method to poll the operation result at intervals as recommended by the API - service. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ListOperations(self, request, context): - """Lists operations that match the specified filter in the request. If the - server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def CancelOperation(self, request, context): - """Starts asynchronous cancellation on a long-running operation. The server - makes a best effort to cancel the operation, but success is not - guaranteed. If the server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. Clients may use - [Operations.GetOperation] or other methods to check whether the - cancellation succeeded or the operation completed despite cancellation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteOperation(self, request, context): - """Deletes a long-running operation. It indicates the client is no longer - interested in the operation result. It does not cancel the operation. - """ - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_OperationsServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GetOperation': grpc.unary_unary_rpc_method_handler( - servicer.GetOperation, - request_deserializer=GetOperationRequest.FromString, - response_serializer=Operation.SerializeToString, - ), - 'ListOperations': grpc.unary_unary_rpc_method_handler( - servicer.ListOperations, - request_deserializer=ListOperationsRequest.FromString, - response_serializer=ListOperationsResponse.SerializeToString, - ), - 'CancelOperation': grpc.unary_unary_rpc_method_handler( - servicer.CancelOperation, - request_deserializer=CancelOperationRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - 'DeleteOperation': grpc.unary_unary_rpc_method_handler( - servicer.DeleteOperation, - request_deserializer=DeleteOperationRequest.FromString, - response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'google.longrunning.Operations', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class BetaOperationsServicer(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - def GetOperation(self, request, context): - """Gets the latest state of a long-running operation. Clients may use this - method to poll the operation result at intervals as recommended by the API - service. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def ListOperations(self, request, context): - """Lists operations that match the specified filter in the request. If the - server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def CancelOperation(self, request, context): - """Starts asynchronous cancellation on a long-running operation. The server - makes a best effort to cancel the operation, but success is not - guaranteed. If the server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. Clients may use - [Operations.GetOperation] or other methods to check whether the - cancellation succeeded or the operation completed despite cancellation. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - def DeleteOperation(self, request, context): - """Deletes a long-running operation. It indicates the client is no longer - interested in the operation result. It does not cancel the operation. - """ - context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) - - -class BetaOperationsStub(object): - """Manages long-running operations with an API service. - - When an API method normally takes long time to complete, it can be designed - to return [Operation][google.longrunning.Operation] to the client, and the client can use this - interface to receive the real response asynchronously by polling the - operation resource, or using `google.watcher.v1.Watcher` interface to watch - the response, or pass the operation resource to another API (such as Google - Cloud Pub/Sub API) to receive the response. Any API service that returns - long-running operations should implement the `Operations` interface so - developers can have a consistent client experience. - """ - def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Gets the latest state of a long-running operation. Clients may use this - method to poll the operation result at intervals as recommended by the API - service. - """ - raise NotImplementedError() - GetOperation.future = None - def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Lists operations that match the specified filter in the request. If the - server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. - """ - raise NotImplementedError() - ListOperations.future = None - def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Starts asynchronous cancellation on a long-running operation. The server - makes a best effort to cancel the operation, but success is not - guaranteed. If the server doesn't support this method, it returns - `google.rpc.Code.UNIMPLEMENTED`. Clients may use - [Operations.GetOperation] or other methods to check whether the - cancellation succeeded or the operation completed despite cancellation. - """ - raise NotImplementedError() - CancelOperation.future = None - def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): - """Deletes a long-running operation. It indicates the client is no longer - interested in the operation result. It does not cancel the operation. - """ - raise NotImplementedError() - DeleteOperation.future = None - - -def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): - request_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, - ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, - } - response_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, - } - method_implementations = { - ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), - ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), - ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), - ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), - } - server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) - return beta_implementations.server(method_implementations, options=server_options) - - -def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): - request_serializers = { - ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, - } - response_deserializers = { - ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, - ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, - ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, - } - cardinalities = { - 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, - 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, - 'GetOperation': cardinality.Cardinality.UNARY_UNARY, - 'ListOperations': cardinality.Cardinality.UNARY_UNARY, - } - stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) - return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options) diff --git a/gcloud/bigtable/client.py b/gcloud/bigtable/client.py index 35ca65ce212a..38ecf51ecbc8 100644 --- a/gcloud/bigtable/client.py +++ b/gcloud/bigtable/client.py @@ -31,16 +31,16 @@ from grpc.beta import implementations -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as instance_admin_v2_pb2) # V1 table admin service -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) # V1 data service -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_pb2 as data_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( operations_grpc_pb2 as operations_grpc_v2_pb2) from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES diff --git a/gcloud/bigtable/cluster.py b/gcloud/bigtable/cluster.py index 7867cff82bc7..993f622afdd0 100644 --- a/gcloud/bigtable/cluster.py +++ b/gcloud/bigtable/cluster.py @@ -19,9 +19,9 @@ from google.longrunning import operations_pb2 -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) @@ -288,7 +288,7 @@ def __ne__(self, other): def reload(self): """Reload the metadata for this cluster.""" request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) - # We expect a `._generated_v2.instance_pb2.Cluster`. + # We expect a `._generated.instance_pb2.Cluster`. cluster_pb = self._instance._client._instance_stub.GetCluster( request_pb, self._instance._client.timeout_seconds) @@ -346,7 +346,7 @@ def update(self): name=self.name, serve_nodes=self.serve_nodes, ) - # Ignore expected `._generated_v2.instance_pb2.Cluster`. + # Ignore expected `._generated.instance_pb2.Cluster`. operation_pb = self._instance._client._instance_stub.UpdateCluster( request_pb, self._instance._client.timeout_seconds) diff --git a/gcloud/bigtable/column_family.py b/gcloud/bigtable/column_family.py index 5c33e6f456c3..8aeb0ca8a0fa 100644 --- a/gcloud/bigtable/column_family.py +++ b/gcloud/bigtable/column_family.py @@ -20,9 +20,9 @@ from google.protobuf import duration_pb2 from gcloud._helpers import _total_seconds -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) diff --git a/gcloud/bigtable/instance.py b/gcloud/bigtable/instance.py index dec6c9029744..e83c4eb8c698 100644 --- a/gcloud/bigtable/instance.py +++ b/gcloud/bigtable/instance.py @@ -20,11 +20,11 @@ from google.longrunning import operations_pb2 from gcloud._helpers import _pb_timestamp_to_datetime -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_messages_v2_pb2) from gcloud.bigtable.cluster import Cluster from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES diff --git a/gcloud/bigtable/row.py b/gcloud/bigtable/row.py index aad7dbecad0e..f04df29d381b 100644 --- a/gcloud/bigtable/row.py +++ b/gcloud/bigtable/row.py @@ -22,9 +22,9 @@ from gcloud._helpers import _datetime_from_microseconds from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) @@ -854,7 +854,7 @@ def _parse_rmw_row_response(row_response): def _parse_family_pb(family_pb): """Parses a Family protobuf into a dictionary. - :type family_pb: :class:`._generated_v2.data_pb2.Family` + :type family_pb: :class:`._generated.data_pb2.Family` :param family_pb: A protobuf :rtype: tuple diff --git a/gcloud/bigtable/row_data.py b/gcloud/bigtable/row_data.py index 3f4490097e68..55f42e5be57f 100644 --- a/gcloud/bigtable/row_data.py +++ b/gcloud/bigtable/row_data.py @@ -44,7 +44,7 @@ def __init__(self, value, timestamp, labels=()): def from_pb(cls, cell_pb): """Create a new cell from a Cell protobuf. - :type cell_pb: :class:`._generated_v2.data_pb2.Cell` + :type cell_pb: :class:`._generated.data_pb2.Cell` :param cell_pb: The protobuf to convert. :rtype: :class:`Cell` diff --git a/gcloud/bigtable/row_filters.py b/gcloud/bigtable/row_filters.py index f76615ba5ea8..07afb45d3ed4 100644 --- a/gcloud/bigtable/row_filters.py +++ b/gcloud/bigtable/row_filters.py @@ -17,7 +17,7 @@ from gcloud._helpers import _microseconds_from_datetime from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) diff --git a/gcloud/bigtable/table.py b/gcloud/bigtable/table.py index 5bcead18e906..3052643ba032 100644 --- a/gcloud/bigtable/table.py +++ b/gcloud/bigtable/table.py @@ -15,11 +15,11 @@ """User friendly container for Google Cloud Bigtable Table.""" from gcloud._helpers import _to_bytes -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_pb2 as data_messages_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_messages_v2_pb2) -from gcloud.bigtable._generated_v2 import ( +from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) from gcloud.bigtable.column_family import _gc_rule_from_pb from gcloud.bigtable.column_family import ColumnFamily @@ -139,7 +139,7 @@ def create(self, initial_split_keys=None, column_families=()): .. note:: A create request returns a - :class:`._generated_v2.table_pb2.Table` but we don't use + :class:`._generated.table_pb2.Table` but we don't use this response. :type initial_split_keys: list @@ -174,7 +174,7 @@ def create(self, initial_split_keys=None, column_families=()): table=table_pb, ) client = self._instance._client - # We expect a `._generated_v2.table_pb2.Table` + # We expect a `._generated.table_pb2.Table` client._table_stub.CreateTable(request_pb, client.timeout_seconds) def delete(self): @@ -199,7 +199,7 @@ def list_column_families(self): request_pb = table_admin_messages_v2_pb2.GetTableRequest( name=self.name) client = self._instance._client - # We expect a `._generated_v2.table_pb2.Table` + # We expect a `._generated.table_pb2.Table` table_pb = client._table_stub.GetTable(request_pb, client.timeout_seconds) diff --git a/gcloud/bigtable/test_client.py b/gcloud/bigtable/test_client.py index 435798ecdf61..5340fab914eb 100644 --- a/gcloud/bigtable/test_client.py +++ b/gcloud/bigtable/test_client.py @@ -570,9 +570,9 @@ def test_instance_factory_w_explicit_serve_nodes(self): self.assertTrue(instance._client is client) def test_list_instances(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from gcloud.bigtable._testing import _FakeStub diff --git a/gcloud/bigtable/test_cluster.py b/gcloud/bigtable/test_cluster.py index 4f8da614f439..925f7d425566 100644 --- a/gcloud/bigtable/test_cluster.py +++ b/gcloud/bigtable/test_cluster.py @@ -588,25 +588,25 @@ def test_op_name_parsing_failure(self): def _CellPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Cell(*args, **kw) def _ClusterPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as instance_v2_pb2) return instance_v2_pb2.Cluster(*args, **kw) def _DeleteClusterRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) return messages_v2_pb2.DeleteClusterRequest(*args, **kw) def _GetClusterRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) return messages_v2_pb2.GetClusterRequest(*args, **kw) diff --git a/gcloud/bigtable/test_column_family.py b/gcloud/bigtable/test_column_family.py index 64fe8a46c78a..8155a1637975 100644 --- a/gcloud/bigtable/test_column_family.py +++ b/gcloud/bigtable/test_column_family.py @@ -405,7 +405,7 @@ def test_to_pb_with_rule(self): self.assertEqual(pb_val, expected) def _create_test_helper(self, gc_rule=None): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) from gcloud.bigtable._testing import _FakeStub @@ -465,7 +465,7 @@ def test_create_with_gc_rule(self): def _update_test_helper(self, gc_rule=None): from gcloud.bigtable._testing import _FakeStub - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) project_id = 'project-id' @@ -524,7 +524,7 @@ def test_update_with_gc_rule(self): def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) from gcloud.bigtable._testing import _FakeStub @@ -643,25 +643,25 @@ def WhichOneof(cls, name): def _GcRulePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) return table_v2_pb2.GcRule(*args, **kw) def _GcRuleIntersectionPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) return table_v2_pb2.GcRule.Intersection(*args, **kw) def _GcRuleUnionPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) return table_v2_pb2.GcRule.Union(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) return table_v2_pb2.ColumnFamily(*args, **kw) diff --git a/gcloud/bigtable/test_instance.py b/gcloud/bigtable/test_instance.py index da8827685292..dca15a6276a2 100644 --- a/gcloud/bigtable/test_instance.py +++ b/gcloud/bigtable/test_instance.py @@ -209,7 +209,7 @@ def test_table_factory(self): self.assertEqual(table._instance, instance) def test__update_from_pb_success(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) display_name = 'display_name' @@ -223,7 +223,7 @@ def test__update_from_pb_success(self): self.assertEqual(instance.display_name, display_name) def test__update_from_pb_no_display_name(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) instance_pb = data_v2_pb2.Instance() @@ -235,7 +235,7 @@ def test__update_from_pb_no_display_name(self): def test_from_pb_success(self): from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) client = _Client(project=self.PROJECT) @@ -254,7 +254,7 @@ def test_from_pb_success(self): _EXISTING_INSTANCE_LOCATION_ID) def test_from_pb_bad_instance_name(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) instance_name = 'INCORRECT_FORMAT' @@ -265,7 +265,7 @@ def test_from_pb_bad_instance_name(self): klass.from_pb(instance_pb, None) def test_from_pb_project_mistmatch(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) ALT_PROJECT = 'ALT_PROJECT' @@ -310,9 +310,9 @@ def test___ne__(self): self.assertNotEqual(instance1, instance2) def test_reload(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) from gcloud.bigtable._testing import _FakeStub @@ -458,7 +458,7 @@ def mock_process_operation(operation_pb): self.assertEqual(process_operation_called, [response_pb]) def test_update(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) from gcloud.bigtable._testing import _FakeStub @@ -493,7 +493,7 @@ def test_update(self): def test_delete(self): from google.protobuf import empty_pb2 - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) from gcloud.bigtable._testing import _FakeStub @@ -524,9 +524,9 @@ def test_delete(self): )]) def test_list_clusters(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as instance_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb2) from gcloud.bigtable._testing import _FakeStub @@ -581,9 +581,9 @@ def test_list_clusters(self): )]) def _list_tables_helper(self, table_name=None): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_data_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_messages_v1_pb2) from gcloud.bigtable._testing import _FakeStub @@ -652,9 +652,9 @@ def _callFUT(self, instance, **kw): def test_w_defaults(self): from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) from gcloud.bigtable.instance import Instance @@ -678,9 +678,9 @@ def test_w_defaults(self): self.assertEqual(cluster.serve_nodes, DEFAULT_SERVE_NODES) def test_w_explicit_serve_nodes(self): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) from gcloud.bigtable.instance import Instance DISPLAY_NAME = u'DISPLAY_NAME' @@ -715,7 +715,7 @@ def _callFUT(self, any_val, expected_type=None): def test_with_known_type_url(self): from google.protobuf import any_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) from gcloud.bigtable import instance as MUT @@ -738,9 +738,9 @@ def test_with_known_type_url(self): def test_with_create_instance_metadata(self): from google.protobuf import any_pb2 from google.protobuf.timestamp_pb2 import Timestamp - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( instance_pb2 as data_v2_pb2) - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) TYPE_URL = ('type.googleapis.com/' + @@ -798,7 +798,7 @@ def _callFUT(self, operation_pb): def test_it(self): from google.longrunning import operations_pb2 from gcloud._testing import _Monkey - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_instance_admin_pb2 as messages_v2_pb) from gcloud.bigtable import instance as MUT diff --git a/gcloud/bigtable/test_row.py b/gcloud/bigtable/test_row.py index ff18945acafb..7589b16ce9cf 100644 --- a/gcloud/bigtable/test_row.py +++ b/gcloud/bigtable/test_row.py @@ -805,91 +805,91 @@ def test_it(self): def _CheckAndMutateRowRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.CheckAndMutateRowRequest(*args, **kw) def _CheckAndMutateRowResponsePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.CheckAndMutateRowResponse(*args, **kw) def _MutateRowRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.MutateRowRequest(*args, **kw) def _ReadModifyWriteRowRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadModifyWriteRowRequest(*args, **kw) def _ReadModifyWriteRowResponsePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadModifyWriteRowResponse(*args, **kw) def _CellPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Cell(*args, **kw) def _ColumnPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Column(*args, **kw) def _FamilyPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Family(*args, **kw) def _MutationPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation(*args, **kw) def _MutationSetCellPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.SetCell(*args, **kw) def _MutationDeleteFromColumnPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.DeleteFromColumn(*args, **kw) def _MutationDeleteFromFamilyPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.DeleteFromFamily(*args, **kw) def _MutationDeleteFromRowPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Mutation.DeleteFromRow(*args, **kw) def _RowPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.Row(*args, **kw) def _ReadModifyWriteRulePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.ReadModifyWriteRule(*args, **kw) diff --git a/gcloud/bigtable/test_row_data.py b/gcloud/bigtable/test_row_data.py index 2162212e7fdd..8463ec89b9f3 100644 --- a/gcloud/bigtable/test_row_data.py +++ b/gcloud/bigtable/test_row_data.py @@ -28,7 +28,7 @@ def _makeOne(self, *args, **kwargs): def _from_pb_test_helper(self, labels=None): import datetime from gcloud._helpers import _EPOCH - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) timestamp_micros = 18738724000 # Make sure millis granularity @@ -698,7 +698,7 @@ def __init__(self, chunks, last_scanned_row_key=''): def _generate_cell_chunks(chunk_text_pbs): from google.protobuf.text_format import Merge - from gcloud.bigtable._generated_v2.bigtable_pb2 import ReadRowsResponse + from gcloud.bigtable._generated.bigtable_pb2 import ReadRowsResponse chunks = [] diff --git a/gcloud/bigtable/test_row_filters.py b/gcloud/bigtable/test_row_filters.py index 594a4fe47c2b..9c0abe24f47d 100644 --- a/gcloud/bigtable/test_row_filters.py +++ b/gcloud/bigtable/test_row_filters.py @@ -960,42 +960,42 @@ def test_to_pb_false_only(self): def _ColumnRangePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.ColumnRange(*args, **kw) def _RowFilterPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter(*args, **kw) def _RowFilterChainPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter.Chain(*args, **kw) def _RowFilterConditionPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter.Condition(*args, **kw) def _RowFilterInterleavePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.RowFilter.Interleave(*args, **kw) def _TimestampRangePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.TimestampRange(*args, **kw) def _ValueRangePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( data_pb2 as data_v2_pb2) return data_v2_pb2.ValueRange(*args, **kw) diff --git a/gcloud/bigtable/test_table.py b/gcloud/bigtable/test_table.py index 7824291c22d2..c9b4e2240e8c 100644 --- a/gcloud/bigtable/test_table.py +++ b/gcloud/bigtable/test_table.py @@ -507,37 +507,37 @@ def test_with_limit(self): def _CreateTableRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) return table_admin_v2_pb2.CreateTableRequest(*args, **kw) def _CreateTableRequestSplitPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) return table_admin_v2_pb2.CreateTableRequest.Split(*args, **kw) def _DeleteTableRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) return table_admin_v2_pb2.DeleteTableRequest(*args, **kw) def _GetTableRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_table_admin_pb2 as table_admin_v2_pb2) return table_admin_v2_pb2.GetTableRequest(*args, **kw) def _ReadRowsRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadRowsRequest(*args, **kw) def _ReadRowsResponseCellChunkPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) family_name = kw.pop('family_name') qualifier = kw.pop('qualifier') @@ -548,25 +548,25 @@ def _ReadRowsResponseCellChunkPB(*args, **kw): def _ReadRowsResponsePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.ReadRowsResponse(*args, **kw) def _SampleRowKeysRequestPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( bigtable_pb2 as messages_v2_pb2) return messages_v2_pb2.SampleRowKeysRequest(*args, **kw) def _TablePB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) return table_v2_pb2.Table(*args, **kw) def _ColumnFamilyPB(*args, **kw): - from gcloud.bigtable._generated_v2 import ( + from gcloud.bigtable._generated import ( table_pb2 as table_v2_pb2) return table_v2_pb2.ColumnFamily(*args, **kw) diff --git a/scripts/check_generate.py b/scripts/check_generate.py deleted file mode 100644 index a126cc659f60..000000000000 --- a/scripts/check_generate.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2015 Google Inc. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Checking that protobuf generated modules import correctly.""" - -from __future__ import print_function - -import glob -import os - - -def main(): - """Import all PB2 files.""" - print('>>> import gcloud.bigtable._generated') - _ = __import__('gcloud.bigtable._generated') - pb2_files = sorted(glob.glob('gcloud/bigtable/_generated/*pb2.py')) - for filename in pb2_files: - basename = os.path.basename(filename) - module_name, _ = os.path.splitext(basename) - - print('>>> from gcloud.bigtable._generated import ' + module_name) - _ = __import__('gcloud.bigtable._generated', fromlist=[module_name]) - - -if __name__ == '__main__': - main() diff --git a/scripts/rewrite_imports.py b/scripts/rewrite_imports.py index d6523d4d5410..75606d776f26 100644 --- a/scripts/rewrite_imports.py +++ b/scripts/rewrite_imports.py @@ -24,13 +24,9 @@ IMPORT_TEMPLATE = 'import %s' IMPORT_FROM_TEMPLATE = 'from %s import ' REPLACEMENTS = { - # Bigtable v1 - 'google.bigtable.admin.cluster.v1': 'gcloud.bigtable._generated', - 'google.bigtable.admin.table.v1': 'gcloud.bigtable._generated', - 'google.bigtable.v1': 'gcloud.bigtable._generated', - # Bigtble v2 - 'google.bigtable.v2': 'gcloud.bigtable._generated_v2', - 'google.bigtable.admin.v2': 'gcloud.bigtable._generated_v2', + # Bigtable v2 + 'google.bigtable.v2': 'gcloud.bigtable._generated', + 'google.bigtable.admin.v2': 'gcloud.bigtable._generated', # Datastore v1beta3 'google.datastore.v1beta3': 'gcloud.datastore._generated', } diff --git a/scripts/run_pylint.py b/scripts/run_pylint.py index 11b36f90571b..ac04989943c1 100644 --- a/scripts/run_pylint.py +++ b/scripts/run_pylint.py @@ -32,7 +32,6 @@ IGNORED_DIRECTORIES = [ os.path.join('gcloud', 'bigtable', '_generated'), - os.path.join('gcloud', 'bigtable', '_generated_v2'), os.path.join('gcloud', 'datastore', '_generated'), ] IGNORED_FILES = [