From 8c124a2fb4d73808b8e0f9267d5422658807a9d2 Mon Sep 17 00:00:00 2001 From: Jeff Ching Date: Sun, 26 Jan 2020 11:34:34 -0800 Subject: [PATCH] feat: add v1beta2, v1alpha2 clients (#44) * feat: add v1beta2 protos * update pom.xml files for new modules * fix formatting * feat: add v1alpha2 client --- .gitignore | 16 +- google-cloud-bigquerystorage-bom/pom.xml | 27 +- google-cloud-bigquerystorage/pom.xml | 18 + .../storage/v1alpha2/BigQueryWriteClient.java | 389 + .../v1alpha2/BigQueryWriteSettings.java | 233 + .../storage/v1alpha2/package-info.java | 42 + .../v1alpha2/stub/BigQueryWriteStub.java | 67 + .../stub/BigQueryWriteStubSettings.java | 381 + .../GrpcBigQueryWriteCallableFactory.java | 115 + .../v1alpha2/stub/GrpcBigQueryWriteStub.java | 292 + .../v1beta2/BaseBigQueryReadClient.java | 388 + .../v1beta2/BaseBigQueryReadSettings.java | 201 + .../storage/v1beta2/package-info.java | 44 + .../v1beta2/stub/BigQueryReadStub.java | 54 + .../stub/BigQueryReadStubSettings.java | 353 + .../stub/GrpcBigQueryReadCallableFactory.java | 115 + .../v1beta2/stub/GrpcBigQueryReadStub.java | 225 + .../v1alpha2/BigQueryWriteClientTest.java | 129 + .../storage/v1alpha2/MockBigQueryWrite.java | 57 + .../v1alpha2/MockBigQueryWriteImpl.java | 159 + .../v1beta2/BaseBigQueryReadClientTest.java | 166 + .../storage/v1beta2/MockBigQueryRead.java | 57 + .../storage/v1beta2/MockBigQueryReadImpl.java | 102 + .../pom.xml | 56 + .../storage/v1alpha2/BigQueryWriteGrpc.java | 900 ++ .../pom.xml | 56 + .../storage/v1beta2/BigQueryReadGrpc.java | 703 ++ pom.xml | 24 + .../pom.xml | 25 + .../storage/v1alpha2/ProtoBufProto.java | 1702 ++++ .../bigquery/storage/v1alpha2/Storage.java | 8598 +++++++++++++++++ .../bigquery/storage/v1alpha2/Stream.java | 2361 +++++ .../bigquery/storage/v1alpha2/Table.java | 3527 +++++++ .../bigquery/storage/v1alpha2/protobuf.proto | 44 + .../bigquery/storage/v1alpha2/storage.proto | 191 + .../bigquery/storage/v1alpha2/stream.proto | 64 + .../bigquery/storage/v1alpha2/table.proto | 101 + .../pom.xml | 25 + .../bigquery/storage/v1beta2/ArrowProto.java | 79 + .../storage/v1beta2/ArrowRecordBatch.java | 551 ++ .../v1beta2/ArrowRecordBatchOrBuilder.java | 38 + .../bigquery/storage/v1beta2/ArrowSchema.java | 558 ++ .../storage/v1beta2/ArrowSchemaOrBuilder.java | 38 + .../bigquery/storage/v1beta2/AvroProto.java | 78 + .../bigquery/storage/v1beta2/AvroRows.java | 550 ++ .../storage/v1beta2/AvroRowsOrBuilder.java | 38 + .../bigquery/storage/v1beta2/AvroSchema.java | 641 ++ .../storage/v1beta2/AvroSchemaOrBuilder.java | 52 + .../v1beta2/CreateReadSessionRequest.java | 1080 +++ .../CreateReadSessionRequestOrBuilder.java | 116 + .../bigquery/storage/v1beta2/DataFormat.java | 162 + .../storage/v1beta2/ReadRowsRequest.java | 731 ++ .../v1beta2/ReadRowsRequestOrBuilder.java | 65 + .../storage/v1beta2/ReadRowsResponse.java | 1798 ++++ .../v1beta2/ReadRowsResponseOrBuilder.java | 184 + .../bigquery/storage/v1beta2/ReadSession.java | 5203 ++++++++++ .../storage/v1beta2/ReadSessionOrBuilder.java | 410 + .../bigquery/storage/v1beta2/ReadStream.java | 645 ++ .../storage/v1beta2/ReadStreamOrBuilder.java | 52 + .../v1beta2/SplitReadStreamRequest.java | 772 ++ .../SplitReadStreamRequestOrBuilder.java | 73 + .../v1beta2/SplitReadStreamResponse.java | 1059 ++ .../SplitReadStreamResponseOrBuilder.java | 104 + .../storage/v1beta2/StorageProto.java | 233 + .../bigquery/storage/v1beta2/StreamProto.java | 163 + .../bigquery/storage/v1beta2/StreamStats.java | 1438 +++ .../storage/v1beta2/StreamStatsOrBuilder.java | 60 + .../storage/v1beta2/ThrottleState.java | 549 ++ .../v1beta2/ThrottleStateOrBuilder.java | 39 + .../bigquery/storage/v1beta2/arrow.proto | 40 + .../cloud/bigquery/storage/v1beta2/avro.proto | 36 + .../bigquery/storage/v1beta2/storage.proto | 227 + .../bigquery/storage/v1beta2/stream.proto | 135 + synth.metadata | 281 +- synth.py | 79 +- versions.txt | 2 + 76 files changed, 40167 insertions(+), 199 deletions(-) create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java create mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java create mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java create mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java create mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java create mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java create mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java create mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java create mode 100644 grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml create mode 100644 grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java create mode 100644 grpc-google-cloud-bigquerystorage-v1beta2/pom.xml create mode 100644 grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/pom.xml create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto create mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/pom.xml create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto create mode 100644 proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto diff --git a/.gitignore b/.gitignore index 7466b121eb..fadd6afc2d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,15 @@ -.idea -*.iml +# Maven target/ + +# Eclipse +.classpath +.project +.settings + +# Intellij +*.iml +.idea/ + +# python utilities +*.pyc +__pycache__ diff --git a/google-cloud-bigquerystorage-bom/pom.xml b/google-cloud-bigquerystorage-bom/pom.xml index 342af32b60..846f15431d 100644 --- a/google-cloud-bigquerystorage-bom/pom.xml +++ b/google-cloud-bigquerystorage-bom/pom.xml @@ -60,22 +60,41 @@ - + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha2 + 0.85.2-SNAPSHOT + com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 0.85.2-SNAPSHOT - com.google.cloud - google-cloud-bigquerystorage - 0.120.2-beta-SNAPSHOT + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + 0.85.2-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha2 + 0.85.2-SNAPSHOT com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 0.85.2-SNAPSHOT + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + 0.85.2-SNAPSHOT + + + com.google.cloud + google-cloud-bigquerystorage + 0.120.2-beta-SNAPSHOT + diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index f1cfa04aaa..1053e01fa4 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -42,10 +42,18 @@ proto-google-common-protos + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha2 + com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + com.google.guava guava @@ -114,11 +122,21 @@ 1.91.3 + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha2 + test + com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 test + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + test + com.google.api diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java new file mode 100644 index 0000000000..7e62308993 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java @@ -0,0 +1,389 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; +import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStub; +import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND SERVICE +/** + * Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   CreateWriteStreamRequest request = CreateWriteStreamRequest.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(request);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the bigQueryWriteClient object to clean up resources such + * as threads. In the example above, try-with-resources is used, which automatically calls close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BigQueryWriteSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BigQueryWriteClient bigQueryWriteClient =
+ *     BigQueryWriteClient.create(bigQueryWriteSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * BigQueryWriteSettings bigQueryWriteSettings =
+ *     BigQueryWriteSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BigQueryWriteClient bigQueryWriteClient =
+ *     BigQueryWriteClient.create(bigQueryWriteSettings);
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class BigQueryWriteClient implements BackgroundResource { + private final BigQueryWriteSettings settings; + private final BigQueryWriteStub stub; + + /** Constructs an instance of BigQueryWriteClient with default settings. */ + public static final BigQueryWriteClient create() throws IOException { + return create(BigQueryWriteSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BigQueryWriteClient create(BigQueryWriteSettings settings) + throws IOException { + return new BigQueryWriteClient(settings); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given stub for making calls. This is + * for advanced usage - prefer to use BigQueryWriteSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BigQueryWriteClient create(BigQueryWriteStub stub) { + return new BigQueryWriteClient(stub); + } + + /** + * Constructs an instance of BigQueryWriteClient, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BigQueryWriteClient(BigQueryWriteSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryWriteStubSettings) settings.getStubSettings()).createStub(); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BigQueryWriteClient(BigQueryWriteStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BigQueryWriteSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BigQueryWriteStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a write stream to the given table. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request = CreateWriteStreamRequest.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.createWriteStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream createWriteStream(CreateWriteStreamRequest request) { + return createWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a write stream to the given table. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   CreateWriteStreamRequest request = CreateWriteStreamRequest.newBuilder().build();
+   *   ApiFuture<WriteStream> future = bigQueryWriteClient.createWriteStreamCallable().futureCall(request);
+   *   // Do something
+   *   WriteStream response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable createWriteStreamCallable() { + return stub.createWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Appends data to the given stream. + * + *

If `offset` is specified, the `offset` is checked against the end of stream. The server + * returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset + * beyond the current end of the stream or `ALREADY_EXISTS` if user provids an `offset` that has + * already been written to. User can retry with adjusted offset within the same RPC stream. If + * `offset` is not specified, append happens at the end of the stream. + * + *

The response contains the offset at which the append happened. Responses are received in the + * same order in which requests are sent. There will be one response for each successful request. + * If the `offset` is not set in response, it means append didn't happen due to some errors. If + * one request fails, all the subsequent requests will also fail until a success request is made + * again. + * + *

If the stream is of `PENDING` type, data will only be available for read operations after + * the stream is committed. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BidiStream<AppendRowsRequest, AppendRowsResponse> bidiStream =
+   *       bigQueryWriteClient.appendRowsCallable().call();
+   *
+   *   AppendRowsRequest request = AppendRowsRequest.newBuilder().build();
+   *   bidiStream.send(request);
+   *   for (AppendRowsResponse response : bidiStream) {
+   *     // Do something when receive a response
+   *   }
+   * }
+   * 
+ */ + public final BidiStreamingCallable appendRowsCallable() { + return stub.appendRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets a write stream. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request = GetWriteStreamRequest.newBuilder().build();
+   *   WriteStream response = bigQueryWriteClient.getWriteStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final WriteStream getWriteStream(GetWriteStreamRequest request) { + return getWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Gets a write stream. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   GetWriteStreamRequest request = GetWriteStreamRequest.newBuilder().build();
+   *   ApiFuture<WriteStream> future = bigQueryWriteClient.getWriteStreamCallable().futureCall(request);
+   *   // Do something
+   *   WriteStream response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable getWriteStreamCallable() { + return stub.getWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Finalize a write stream so that no new data can be appended to the stream. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request = FinalizeWriteStreamRequest.newBuilder().build();
+   *   FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final FinalizeWriteStreamResponse finalizeWriteStream(FinalizeWriteStreamRequest request) { + return finalizeWriteStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Finalize a write stream so that no new data can be appended to the stream. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   FinalizeWriteStreamRequest request = FinalizeWriteStreamRequest.newBuilder().build();
+   *   ApiFuture<FinalizeWriteStreamResponse> future = bigQueryWriteClient.finalizeWriteStreamCallable().futureCall(request);
+   *   // Do something
+   *   FinalizeWriteStreamResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + finalizeWriteStreamCallable() { + return stub.finalizeWriteStreamCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams + * must be finalized before commit and cannot be committed multiple times. Once a stream is + * committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request = BatchCommitWriteStreamsRequest.newBuilder().build();
+   *   BatchCommitWriteStreamsResponse response = bigQueryWriteClient.batchCommitWriteStreams(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final BatchCommitWriteStreamsResponse batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request) { + return batchCommitWriteStreamsCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams + * must be finalized before commit and cannot be committed multiple times. Once a stream is + * committed, data in the stream becomes available for read operations. + * + *

Sample code: + * + *


+   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+   *   BatchCommitWriteStreamsRequest request = BatchCommitWriteStreamsRequest.newBuilder().build();
+   *   ApiFuture<BatchCommitWriteStreamsResponse> future = bigQueryWriteClient.batchCommitWriteStreamsCallable().futureCall(request);
+   *   // Do something
+   *   BatchCommitWriteStreamsResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + batchCommitWriteStreamsCallable() { + return stub.batchCommitWriteStreamsCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java new file mode 100644 index 0000000000..4fbfc58128 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java @@ -0,0 +1,233 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; +import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link BigQueryWriteClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createWriteStream to 30 seconds: + * + *

+ * 
+ * BigQueryWriteSettings.Builder bigQueryWriteSettingsBuilder =
+ *     BigQueryWriteSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder.createWriteStreamSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BigQueryWriteSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class BigQueryWriteSettings extends ClientSettings { + /** Returns the object with the settings used for calls to createWriteStream. */ + public UnaryCallSettings createWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).createWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to appendRows. */ + public StreamingCallSettings appendRowsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).appendRowsSettings(); + } + + /** Returns the object with the settings used for calls to getWriteStream. */ + public UnaryCallSettings getWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).getWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings + finalizeWriteStreamSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).finalizeWriteStreamSettings(); + } + + /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings(); + } + + public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub) + throws IOException { + return new BigQueryWriteSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryWriteStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryWriteStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryWriteStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryWriteStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryWriteStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryWriteStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryWriteStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BigQueryWriteSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryWriteStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(BigQueryWriteStubSettings.newBuilder()); + } + + protected Builder(BigQueryWriteSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryWriteStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public BigQueryWriteStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryWriteStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createWriteStream. */ + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return getStubSettingsBuilder().createWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to appendRows. */ + public StreamingCallSettings.Builder + appendRowsSettings() { + return getStubSettingsBuilder().appendRowsSettings(); + } + + /** Returns the builder for the settings used for calls to getWriteStream. */ + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getStubSettingsBuilder().getWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return getStubSettingsBuilder().finalizeWriteStreamSettings(); + } + + /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return getStubSettingsBuilder().batchCommitWriteStreamsSettings(); + } + + @Override + public BigQueryWriteSettings build() throws IOException { + return new BigQueryWriteSettings(this); + } + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java new file mode 100644 index 0000000000..9d16b03f8a --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API. + * + *

The interfaces provided are listed below, along with usage samples. + * + *

=================== BigQueryWriteClient =================== + * + *

Service Description: BigQuery Write API. + * + *

The Write API can be used to write data to BigQuery. + * + *

Sample for BigQueryWriteClient: + * + *

+ * 
+ * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
+ *   CreateWriteStreamRequest request = CreateWriteStreamRequest.newBuilder().build();
+ *   WriteStream response = bigQueryWriteClient.createWriteStream(request);
+ * }
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +package com.google.cloud.bigquery.storage.v1alpha2; + +import javax.annotation.Generated; diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java new file mode 100644 index 0000000000..1cbfb0e8f7 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java @@ -0,0 +1,67 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Base stub class for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public abstract class BigQueryWriteStub implements BackgroundResource { + + public UnaryCallable createWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: createWriteStreamCallable()"); + } + + public BidiStreamingCallable appendRowsCallable() { + throw new UnsupportedOperationException("Not implemented: appendRowsCallable()"); + } + + public UnaryCallable getWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: getWriteStreamCallable()"); + } + + public UnaryCallable + finalizeWriteStreamCallable() { + throw new UnsupportedOperationException("Not implemented: finalizeWriteStreamCallable()"); + } + + public UnaryCallable + batchCommitWriteStreamsCallable() { + throw new UnsupportedOperationException("Not implemented: batchCommitWriteStreamsCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java new file mode 100644 index 0000000000..a3dcd59b4a --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java @@ -0,0 +1,381 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link BigQueryWriteStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createWriteStream to 30 seconds: + * + *

+ * 
+ * BigQueryWriteStubSettings.Builder bigQueryWriteSettingsBuilder =
+ *     BigQueryWriteStubSettings.newBuilder();
+ * bigQueryWriteSettingsBuilder.createWriteStreamSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BigQueryWriteStubSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class BigQueryWriteStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/bigquery.insertdata") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createWriteStreamSettings; + private final StreamingCallSettings appendRowsSettings; + private final UnaryCallSettings getWriteStreamSettings; + private final UnaryCallSettings + finalizeWriteStreamSettings; + private final UnaryCallSettings + batchCommitWriteStreamsSettings; + + /** Returns the object with the settings used for calls to createWriteStream. */ + public UnaryCallSettings createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to appendRows. */ + public StreamingCallSettings appendRowsSettings() { + return appendRowsSettings; + } + + /** Returns the object with the settings used for calls to getWriteStream. */ + public UnaryCallSettings getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BigQueryWriteStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryWriteStub.create(this); + } else { + throw new UnsupportedOperationException( + "Transport not supported: " + getTransportChannelProvider().getTransportName()); + } + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryWriteStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryWriteStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createWriteStreamSettings = settingsBuilder.createWriteStreamSettings().build(); + appendRowsSettings = settingsBuilder.appendRowsSettings().build(); + getWriteStreamSettings = settingsBuilder.getWriteStreamSettings().build(); + finalizeWriteStreamSettings = settingsBuilder.finalizeWriteStreamSettings().build(); + batchCommitWriteStreamsSettings = settingsBuilder.batchCommitWriteStreamsSettings().build(); + } + + /** Builder for BigQueryWriteStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + createWriteStreamSettings; + private final StreamingCallSettings.Builder + appendRowsSettings; + private final UnaryCallSettings.Builder + getWriteStreamSettings; + private final UnaryCallSettings.Builder + finalizeWriteStreamSettings; + private final UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings; + + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "idempotent", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put("non_idempotent", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(20000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(20000L)) + .setTotalTimeout(Duration.ofMillis(600000L)) + .build(); + definitions.put("default", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + appendRowsSettings = StreamingCallSettings.newBuilder(); + + getWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + finalizeWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + batchCommitWriteStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings); + + initDefaults(this); + } + + private static Builder createDefault() { + Builder builder = new Builder((ClientContext) null); + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + + builder + .createWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .getWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .finalizeWriteStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("non_idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + builder + .batchCommitWriteStreamsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + return builder; + } + + protected Builder(BigQueryWriteStubSettings settings) { + super(settings); + + createWriteStreamSettings = settings.createWriteStreamSettings.toBuilder(); + appendRowsSettings = settings.appendRowsSettings.toBuilder(); + getWriteStreamSettings = settings.getWriteStreamSettings.toBuilder(); + finalizeWriteStreamSettings = settings.finalizeWriteStreamSettings.toBuilder(); + batchCommitWriteStreamsSettings = settings.batchCommitWriteStreamsSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createWriteStreamSettings, + getWriteStreamSettings, + finalizeWriteStreamSettings, + batchCommitWriteStreamsSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createWriteStream. */ + public UnaryCallSettings.Builder + createWriteStreamSettings() { + return createWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to appendRows. */ + public StreamingCallSettings.Builder + appendRowsSettings() { + return appendRowsSettings; + } + + /** Returns the builder for the settings used for calls to getWriteStream. */ + public UnaryCallSettings.Builder getWriteStreamSettings() { + return getWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to finalizeWriteStream. */ + public UnaryCallSettings.Builder + finalizeWriteStreamSettings() { + return finalizeWriteStreamSettings; + } + + /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ + public UnaryCallSettings.Builder< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsSettings() { + return batchCommitWriteStreamsSettings; + } + + @Override + public BigQueryWriteStubSettings build() throws IOException { + return new BigQueryWriteStubSettings(this); + } + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java new file mode 100644 index 0000000000..52c747757c --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC callable factory implementation for BigQuery Storage API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator") +@BetaApi("The surface for use by generated code is not stable yet and may change in the future.") +public class GrpcBigQueryWriteCallableFactory implements GrpcStubCallableFactory { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings pagedCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable( + grpcCallSettings, pagedCallSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings batchingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, batchingCallSettings, clientContext); + } + + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings operationCallSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, operationCallSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java new file mode 100644 index 0000000000..2addffce8a --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java @@ -0,0 +1,292 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsExtractor; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; +import com.google.common.collect.ImmutableMap; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC stub implementation for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public class GrpcBigQueryWriteStub extends BigQueryWriteStub { + + private static final MethodDescriptor + createWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/CreateWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .build(); + private static final MethodDescriptor + appendRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/AppendRows") + .setRequestMarshaller(ProtoUtils.marshaller(AppendRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(AppendRowsResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor + getWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/GetWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(GetWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(WriteStream.getDefaultInstance())) + .build(); + private static final MethodDescriptor + finalizeWriteStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/FinalizeWriteStream") + .setRequestMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(FinalizeWriteStreamResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor< + BatchCommitWriteStreamsRequest, BatchCommitWriteStreamsResponse> + batchCommitWriteStreamsMethodDescriptor = + MethodDescriptor + .newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/BatchCommitWriteStreams") + .setRequestMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(BatchCommitWriteStreamsResponse.getDefaultInstance())) + .build(); + + private final BackgroundResource backgroundResources; + + private final UnaryCallable createWriteStreamCallable; + private final BidiStreamingCallable appendRowsCallable; + private final UnaryCallable getWriteStreamCallable; + private final UnaryCallable + finalizeWriteStreamCallable; + private final UnaryCallable + batchCommitWriteStreamsCallable; + + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryWriteStub create(BigQueryWriteStubSettings settings) + throws IOException { + return new GrpcBigQueryWriteStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryWriteStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryWriteStub(BigQueryWriteStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryWriteStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryWriteStub( + BigQueryWriteStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub(BigQueryWriteStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryWriteCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryWriteStub( + BigQueryWriteStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + + GrpcCallSettings createWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createWriteStreamMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(CreateWriteStreamRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("parent", String.valueOf(request.getParent())); + return params.build(); + } + }) + .build(); + GrpcCallSettings appendRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(appendRowsMethodDescriptor) + .build(); + GrpcCallSettings getWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(getWriteStreamMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(GetWriteStreamRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + } + }) + .build(); + GrpcCallSettings + finalizeWriteStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(finalizeWriteStreamMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(FinalizeWriteStreamRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + } + }) + .build(); + GrpcCallSettings + batchCommitWriteStreamsTransportSettings = + GrpcCallSettings + .newBuilder() + .setMethodDescriptor(batchCommitWriteStreamsMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(BatchCommitWriteStreamsRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("parent", String.valueOf(request.getParent())); + return params.build(); + } + }) + .build(); + + this.createWriteStreamCallable = + callableFactory.createUnaryCallable( + createWriteStreamTransportSettings, + settings.createWriteStreamSettings(), + clientContext); + this.appendRowsCallable = + callableFactory.createBidiStreamingCallable( + appendRowsTransportSettings, settings.appendRowsSettings(), clientContext); + this.getWriteStreamCallable = + callableFactory.createUnaryCallable( + getWriteStreamTransportSettings, settings.getWriteStreamSettings(), clientContext); + this.finalizeWriteStreamCallable = + callableFactory.createUnaryCallable( + finalizeWriteStreamTransportSettings, + settings.finalizeWriteStreamSettings(), + clientContext); + this.batchCommitWriteStreamsCallable = + callableFactory.createUnaryCallable( + batchCommitWriteStreamsTransportSettings, + settings.batchCommitWriteStreamsSettings(), + clientContext); + + backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public UnaryCallable createWriteStreamCallable() { + return createWriteStreamCallable; + } + + public BidiStreamingCallable appendRowsCallable() { + return appendRowsCallable; + } + + public UnaryCallable getWriteStreamCallable() { + return getWriteStreamCallable; + } + + public UnaryCallable + finalizeWriteStreamCallable() { + return finalizeWriteStreamCallable; + } + + public UnaryCallable + batchCommitWriteStreamsCallable() { + return batchCommitWriteStreamsCallable; + } + + @Override + public final void close() { + shutdown(); + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java new file mode 100644 index 0000000000..f059bab1d1 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java @@ -0,0 +1,388 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryReadStub; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryReadStubSettings; +import java.io.IOException; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND SERVICE +/** + * Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

This class provides the ability to make remote calls to the backing service through method + * calls that map to API methods. Sample code to get started: + * + *

+ * 
+ * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+ *   String parent = "";
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 0;
+ *   ReadSession response = baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * 
+ * 
+ * + *

Note: close() needs to be called on the baseBigQueryReadClient object to clean up resources + * such as threads. In the example above, try-with-resources is used, which automatically calls + * close(). + * + *

The surface of this class includes several types of Java methods for each of the API's + * methods: + * + *

    + *
  1. A "flattened" method. With this type of method, the fields of the request type have been + * converted into function parameters. It may be the case that not all fields are available as + * parameters, and not every API method will have a flattened method entry point. + *
  2. A "request object" method. This type of method only takes one parameter, a request object, + * which must be constructed before the call. Not every API method will have a request object + * method. + *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API + * callable object, which can be used to initiate calls to the service. + *
+ * + *

See the individual methods for example code. + * + *

Many parameters require resource names to be formatted in a particular way. To assist with + * these names, this class includes a format method for each type of name, and additionally a parse + * method to extract the individual identifiers contained within names that are returned. + * + *

This class can be customized by passing in a custom instance of BaseBigQueryReadSettings to + * create(). For example: + * + *

To customize credentials: + * + *

+ * 
+ * BaseBigQueryReadSettings baseBigQueryReadSettings =
+ *     BaseBigQueryReadSettings.newBuilder()
+ *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
+ *         .build();
+ * BaseBigQueryReadClient baseBigQueryReadClient =
+ *     BaseBigQueryReadClient.create(baseBigQueryReadSettings);
+ * 
+ * 
+ * + * To customize the endpoint: + * + *
+ * 
+ * BaseBigQueryReadSettings baseBigQueryReadSettings =
+ *     BaseBigQueryReadSettings.newBuilder().setEndpoint(myEndpoint).build();
+ * BaseBigQueryReadClient baseBigQueryReadClient =
+ *     BaseBigQueryReadClient.create(baseBigQueryReadSettings);
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class BaseBigQueryReadClient implements BackgroundResource { + private final BaseBigQueryReadSettings settings; + private final BigQueryReadStub stub; + + /** Constructs an instance of BaseBigQueryReadClient with default settings. */ + public static final BaseBigQueryReadClient create() throws IOException { + return create(BaseBigQueryReadSettings.newBuilder().build()); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given settings. The channels are + * created based on the settings passed in, or defaults for any settings that are not set. + */ + public static final BaseBigQueryReadClient create(BaseBigQueryReadSettings settings) + throws IOException { + return new BaseBigQueryReadClient(settings); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given stub for making calls. This + * is for advanced usage - prefer to use BaseBigQueryReadSettings}. + */ + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public static final BaseBigQueryReadClient create(BigQueryReadStub stub) { + return new BaseBigQueryReadClient(stub); + } + + /** + * Constructs an instance of BaseBigQueryReadClient, using the given settings. This is protected + * so that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected BaseBigQueryReadClient(BaseBigQueryReadSettings settings) throws IOException { + this.settings = settings; + this.stub = ((BigQueryReadStubSettings) settings.getStubSettings()).createStub(); + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + protected BaseBigQueryReadClient(BigQueryReadStub stub) { + this.settings = null; + this.stub = stub; + } + + public final BaseBigQueryReadSettings getSettings() { + return settings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BigQueryReadStub getStub() { + return stub; + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   String parent = "";
+   *   ReadSession readSession = ReadSession.newBuilder().build();
+   *   int maxStreamCount = 0;
+   *   ReadSession response = baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+   * }
+   * 
+ * + * @param parent Required. The request project that owns the session, in the form of + * `projects/{project_id}`. + * @param readSession Required. Session to be created. + * @param maxStreamCount Max initial number of streams. If unset or zero, the server will provide + * a value of streams so as to produce reasonable throughput. Must be non-negative. The number + * of streams may be lower than the requested number, depending on the amount parallelism that + * is reasonable for the table. Error will be returned if the max count is greater than the + * current system max limit of 1,000. + *

Streams must be read starting from offset 0. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession( + String parent, ReadSession readSession, int maxStreamCount) { + + CreateReadSessionRequest request = + CreateReadSessionRequest.newBuilder() + .setParent(parent) + .setReadSession(readSession) + .setMaxStreamCount(maxStreamCount) + .build(); + return createReadSession(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().build();
+   *   ReadSession response = baseBigQueryReadClient.createReadSession(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final ReadSession createReadSession(CreateReadSessionRequest request) { + return createReadSessionCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Creates a new read session. A read session divides the contents of a BigQuery table into one or + * more streams, which can then be used to read data from the table. The read session also + * specifies properties of the data to be read, such as a list of columns or a push-down filter + * describing the rows to be returned. + * + *

A particular row can be read by at most one stream. When the caller has reached the end of + * each stream in the session, then all the data in the table has been read. + * + *

Data is assigned to each stream such that roughly the same number of rows can be read from + * each stream. Because the server-side unit for assigning data is collections of rows, the API + * does not guarantee that each stream will return the same number or rows. Additionally, the + * limits are enforced based on the number of pre-filtered rows, so some filters can lead to + * lopsided assignments. + * + *

Read sessions automatically expire 24 hours after they are created and do not require manual + * clean-up by the caller. + * + *

Sample code: + * + *


+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   CreateReadSessionRequest request = CreateReadSessionRequest.newBuilder().build();
+   *   ApiFuture<ReadSession> future = baseBigQueryReadClient.createReadSessionCallable().futureCall(request);
+   *   // Do something
+   *   ReadSession response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable createReadSessionCallable() { + return stub.createReadSessionCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Reads rows from the stream in the format prescribed by the ReadSession. Each response contains + * one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to + * read individual rows larger than 100 MiB will fail. + * + *

Each request also returns a set of stream statistics reflecting the current state of the + * stream. + * + *

Sample code: + * + *


+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   ReadRowsRequest request = ReadRowsRequest.newBuilder().build();
+   *
+   *   ServerStream<ReadRowsResponse> stream = baseBigQueryReadClient.readRowsCallable().call(request);
+   *   for (ReadRowsResponse response : stream) {
+   *     // Do something when receive a response
+   *   }
+   * }
+   * 
+ */ + public final ServerStreamingCallable readRowsCallable() { + return stub.readRowsCallable(); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *


+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder().build();
+   *   SplitReadStreamResponse response = baseBigQueryReadClient.splitReadStream(request);
+   * }
+   * 
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final SplitReadStreamResponse splitReadStream(SplitReadStreamRequest request) { + return splitReadStreamCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD + /** + * Splits a given `ReadStream` into two `ReadStream` objects. These `ReadStream` objects are + * referred to as the primary and the residual streams of the split. The original `ReadStream` can + * still be read from in the same manner as before. Both of the returned `ReadStream` objects can + * also be read from, and the rows returned by both child streams will be the same as the rows + * read from the original stream. + * + *

Moreover, the two child streams will be allocated back-to-back in the original `ReadStream`. + * Concretely, it is guaranteed that for streams original, primary, and residual, that + * original[0-j] = primary[0-j] and original[j-n] = residual[0-m] once the streams have been read + * to completion. + * + *

Sample code: + * + *


+   * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+   *   SplitReadStreamRequest request = SplitReadStreamRequest.newBuilder().build();
+   *   ApiFuture<SplitReadStreamResponse> future = baseBigQueryReadClient.splitReadStreamCallable().futureCall(request);
+   *   // Do something
+   *   SplitReadStreamResponse response = future.get();
+   * }
+   * 
+ */ + public final UnaryCallable + splitReadStreamCallable() { + return stub.splitReadStreamCallable(); + } + + @Override + public final void close() { + stub.close(); + } + + @Override + public void shutdown() { + stub.shutdown(); + } + + @Override + public boolean isShutdown() { + return stub.isShutdown(); + } + + @Override + public boolean isTerminated() { + return stub.isTerminated(); + } + + @Override + public void shutdownNow() { + stub.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return stub.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java new file mode 100644 index 0000000000..7eb780040a --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadSettings.java @@ -0,0 +1,201 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.stub.BigQueryReadStubSettings; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link BaseBigQueryReadClient}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * BaseBigQueryReadSettings.Builder baseBigQueryReadSettingsBuilder =
+ *     BaseBigQueryReadSettings.newBuilder();
+ * baseBigQueryReadSettingsBuilder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BaseBigQueryReadSettings baseBigQueryReadSettings = baseBigQueryReadSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class BaseBigQueryReadSettings extends ClientSettings { + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).createReadSessionSettings(); + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).readRowsSettings(); + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return ((BigQueryReadStubSettings) getStubSettings()).splitReadStreamSettings(); + } + + public static final BaseBigQueryReadSettings create(BigQueryReadStubSettings stub) + throws IOException { + return new BaseBigQueryReadSettings.Builder(stub.toBuilder()).build(); + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return BigQueryReadStubSettings.defaultExecutorProviderBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return BigQueryReadStubSettings.getDefaultEndpoint(); + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return BigQueryReadStubSettings.getDefaultServiceScopes(); + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return BigQueryReadStubSettings.defaultCredentialsProviderBuilder(); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return BigQueryReadStubSettings.defaultGrpcTransportProviderBuilder(); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return BigQueryReadStubSettings.defaultTransportChannelProvider(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return BigQueryReadStubSettings.defaultApiClientHeaderProviderBuilder(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BaseBigQueryReadSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + } + + /** Builder for BaseBigQueryReadSettings. */ + public static class Builder extends ClientSettings.Builder { + protected Builder() throws IOException { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(BigQueryReadStubSettings.newBuilder(clientContext)); + } + + private static Builder createDefault() { + return new Builder(BigQueryReadStubSettings.newBuilder()); + } + + protected Builder(BaseBigQueryReadSettings settings) { + super(settings.getStubSettings().toBuilder()); + } + + protected Builder(BigQueryReadStubSettings.Builder stubSettings) { + super(stubSettings); + } + + public BigQueryReadStubSettings.Builder getStubSettingsBuilder() { + return ((BigQueryReadStubSettings.Builder) getStubSettings()); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods( + getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); + return this; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return getStubSettingsBuilder().createReadSessionSettings(); + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return getStubSettingsBuilder().readRowsSettings(); + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return getStubSettingsBuilder().splitReadStreamSettings(); + } + + @Override + public BaseBigQueryReadSettings build() throws IOException { + return new BaseBigQueryReadSettings(this); + } + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java new file mode 100644 index 0000000000..3796e89f19 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/package-info.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to BigQuery Storage API. + * + *

The interfaces provided are listed below, along with usage samples. + * + *

====================== BaseBigQueryReadClient ====================== + * + *

Service Description: BigQuery Read API. + * + *

The Read API can be used to read data from BigQuery. + * + *

Sample for BaseBigQueryReadClient: + * + *

+ * 
+ * try (BaseBigQueryReadClient baseBigQueryReadClient = BaseBigQueryReadClient.create()) {
+ *   String parent = "";
+ *   ReadSession readSession = ReadSession.newBuilder().build();
+ *   int maxStreamCount = 0;
+ *   ReadSession response = baseBigQueryReadClient.createReadSession(parent, readSession, maxStreamCount);
+ * }
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +package com.google.cloud.bigquery.storage.v1beta2; + +import javax.annotation.Generated; diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java new file mode 100644 index 0000000000..53b3e4aad2 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStub.java @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Base stub class for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public abstract class BigQueryReadStub implements BackgroundResource { + + public UnaryCallable createReadSessionCallable() { + throw new UnsupportedOperationException("Not implemented: createReadSessionCallable()"); + } + + public ServerStreamingCallable readRowsCallable() { + throw new UnsupportedOperationException("Not implemented: readRowsCallable()"); + } + + public UnaryCallable splitReadStreamCallable() { + throw new UnsupportedOperationException("Not implemented: splitReadStreamCallable()"); + } + + @Override + public abstract void close(); +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java new file mode 100644 index 0000000000..c0d0dcaab0 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/BigQueryReadStubSettings.java @@ -0,0 +1,353 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.ApiFunction; +import com.google.api.core.BetaApi; +import com.google.api.gax.core.GaxProperties; +import com.google.api.gax.core.GoogleCredentialsProvider; +import com.google.api.gax.core.InstantiatingExecutorProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.GrpcTransportChannel; +import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.StubSettings; +import com.google.api.gax.rpc.TransportChannelProvider; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; +import java.io.IOException; +import java.util.List; +import javax.annotation.Generated; +import org.threeten.bp.Duration; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * Settings class to configure an instance of {@link BigQueryReadStub}. + * + *

The default instance has everything set to sensible defaults: + * + *

    + *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are + * used. + *
  • Credentials are acquired automatically through Application Default Credentials. + *
  • Retries are configured for idempotent methods but not for non-idempotent methods. + *
+ * + *

The builder of this class is recursive, so contained classes are themselves builders. When + * build() is called, the tree of builders is called to create the complete settings object. + * + *

For example, to set the total timeout of createReadSession to 30 seconds: + * + *

+ * 
+ * BigQueryReadStubSettings.Builder baseBigQueryReadSettingsBuilder =
+ *     BigQueryReadStubSettings.newBuilder();
+ * baseBigQueryReadSettingsBuilder.createReadSessionSettings().getRetrySettings().toBuilder()
+ *     .setTotalTimeout(Duration.ofSeconds(30));
+ * BigQueryReadStubSettings baseBigQueryReadSettings = baseBigQueryReadSettingsBuilder.build();
+ * 
+ * 
+ */ +@Generated("by gapic-generator") +@BetaApi +public class BigQueryReadStubSettings extends StubSettings { + /** The default scopes of the service. */ + private static final ImmutableList DEFAULT_SERVICE_SCOPES = + ImmutableList.builder() + .add("https://www.googleapis.com/auth/bigquery") + .add("https://www.googleapis.com/auth/bigquery.readonly") + .add("https://www.googleapis.com/auth/cloud-platform") + .build(); + + private final UnaryCallSettings createReadSessionSettings; + private final ServerStreamingCallSettings readRowsSettings; + private final UnaryCallSettings + splitReadStreamSettings; + + /** Returns the object with the settings used for calls to createReadSession. */ + public UnaryCallSettings createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the object with the settings used for calls to readRows. */ + public ServerStreamingCallSettings readRowsSettings() { + return readRowsSettings; + } + + /** Returns the object with the settings used for calls to splitReadStream. */ + public UnaryCallSettings + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @BetaApi("A restructuring of stub classes is planned, so this may break in the future") + public BigQueryReadStub createStub() throws IOException { + if (getTransportChannelProvider() + .getTransportName() + .equals(GrpcTransportChannel.getGrpcTransportName())) { + return GrpcBigQueryReadStub.create(this); + } else { + throw new UnsupportedOperationException( + "Transport not supported: " + getTransportChannelProvider().getTransportName()); + } + } + + /** Returns a builder for the default ExecutorProvider for this service. */ + public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { + return InstantiatingExecutorProvider.newBuilder(); + } + + /** Returns the default service endpoint. */ + public static String getDefaultEndpoint() { + return "bigquerystorage.googleapis.com:443"; + } + + /** Returns the default service scopes. */ + public static List getDefaultServiceScopes() { + return DEFAULT_SERVICE_SCOPES; + } + + /** Returns a builder for the default credentials for this service. */ + public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { + return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); + } + + /** Returns a builder for the default ChannelProvider for this service. */ + public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { + return InstantiatingGrpcChannelProvider.newBuilder() + .setMaxInboundMessageSize(Integer.MAX_VALUE); + } + + public static TransportChannelProvider defaultTransportChannelProvider() { + return defaultGrpcTransportProviderBuilder().build(); + } + + @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") + public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { + return ApiClientHeaderProvider.newBuilder() + .setGeneratedLibToken( + "gapic", GaxProperties.getLibraryVersion(BigQueryReadStubSettings.class)) + .setTransportToken( + GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder() { + return Builder.createDefault(); + } + + /** Returns a new builder for this class. */ + public static Builder newBuilder(ClientContext clientContext) { + return new Builder(clientContext); + } + + /** Returns a builder containing all the values of this settings class. */ + public Builder toBuilder() { + return new Builder(this); + } + + protected BigQueryReadStubSettings(Builder settingsBuilder) throws IOException { + super(settingsBuilder); + + createReadSessionSettings = settingsBuilder.createReadSessionSettings().build(); + readRowsSettings = settingsBuilder.readRowsSettings().build(); + splitReadStreamSettings = settingsBuilder.splitReadStreamSettings().build(); + } + + /** Builder for BigQueryReadStubSettings. */ + public static class Builder extends StubSettings.Builder { + private final ImmutableList> unaryMethodSettingsBuilders; + + private final UnaryCallSettings.Builder + createReadSessionSettings; + private final ServerStreamingCallSettings.Builder + readRowsSettings; + private final UnaryCallSettings.Builder + splitReadStreamSettings; + + private static final ImmutableMap> + RETRYABLE_CODE_DEFINITIONS; + + static { + ImmutableMap.Builder> definitions = + ImmutableMap.builder(); + definitions.put( + "idempotent", + ImmutableSet.copyOf( + Lists.newArrayList( + StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + definitions.put( + "unary_streaming", + ImmutableSet.copyOf(Lists.newArrayList(StatusCode.Code.UNAVAILABLE))); + definitions.put("non_idempotent", ImmutableSet.copyOf(Lists.newArrayList())); + RETRYABLE_CODE_DEFINITIONS = definitions.build(); + } + + private static final ImmutableMap RETRY_PARAM_DEFINITIONS; + + static { + ImmutableMap.Builder definitions = ImmutableMap.builder(); + RetrySettings settings = null; + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(20000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(20000L)) + .setTotalTimeout(Duration.ofMillis(600000L)) + .build(); + definitions.put("default", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(120000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(120000L)) + .setTotalTimeout(Duration.ofMillis(600000L)) + .build(); + definitions.put("create_read_session", settings); + settings = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(100L)) + .setRetryDelayMultiplier(1.3) + .setMaxRetryDelay(Duration.ofMillis(60000L)) + .setInitialRpcTimeout(Duration.ofMillis(86400000L)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMillis(86400000L)) + .setTotalTimeout(Duration.ofMillis(86400000L)) + .build(); + definitions.put("read_rows", settings); + RETRY_PARAM_DEFINITIONS = definitions.build(); + } + + protected Builder() { + this((ClientContext) null); + } + + protected Builder(ClientContext clientContext) { + super(clientContext); + + createReadSessionSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + readRowsSettings = ServerStreamingCallSettings.newBuilder(); + + splitReadStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + + initDefaults(this); + } + + private static Builder createDefault() { + Builder builder = new Builder((ClientContext) null); + builder.setTransportChannelProvider(defaultTransportChannelProvider()); + builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); + builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); + builder.setEndpoint(getDefaultEndpoint()); + return initDefaults(builder); + } + + private static Builder initDefaults(Builder builder) { + + builder + .createReadSessionSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("create_read_session")); + + builder + .readRowsSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("unary_streaming")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("read_rows")); + + builder + .splitReadStreamSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("idempotent")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("default")); + + return builder; + } + + protected Builder(BigQueryReadStubSettings settings) { + super(settings); + + createReadSessionSettings = settings.createReadSessionSettings.toBuilder(); + readRowsSettings = settings.readRowsSettings.toBuilder(); + splitReadStreamSettings = settings.splitReadStreamSettings.toBuilder(); + + unaryMethodSettingsBuilders = + ImmutableList.>of( + createReadSessionSettings, splitReadStreamSettings); + } + + // NEXT_MAJOR_VER: remove 'throws Exception' + /** + * Applies the given settings updater function to all of the unary API methods in this service. + * + *

Note: This method does not support applying settings to streaming methods. + */ + public Builder applyToAllUnaryMethods( + ApiFunction, Void> settingsUpdater) throws Exception { + super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); + return this; + } + + public ImmutableList> unaryMethodSettingsBuilders() { + return unaryMethodSettingsBuilders; + } + + /** Returns the builder for the settings used for calls to createReadSession. */ + public UnaryCallSettings.Builder + createReadSessionSettings() { + return createReadSessionSettings; + } + + /** Returns the builder for the settings used for calls to readRows. */ + public ServerStreamingCallSettings.Builder + readRowsSettings() { + return readRowsSettings; + } + + /** Returns the builder for the settings used for calls to splitReadStream. */ + public UnaryCallSettings.Builder + splitReadStreamSettings() { + return splitReadStreamSettings; + } + + @Override + public BigQueryReadStubSettings build() throws IOException { + return new BigQueryReadStubSettings(this); + } + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java new file mode 100644 index 0000000000..1b9c450f4c --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcCallableFactory; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.BatchingCallSettings; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.ClientStreamingCallable; +import com.google.api.gax.rpc.OperationCallSettings; +import com.google.api.gax.rpc.OperationCallable; +import com.google.api.gax.rpc.PagedCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StreamingCallSettings; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.longrunning.stub.OperationsStub; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC callable factory implementation for BigQuery Storage API. + * + *

This class is for advanced usage. + */ +@Generated("by gapic-generator") +@BetaApi("The surface for use by generated code is not stable yet and may change in the future.") +public class GrpcBigQueryReadCallableFactory implements GrpcStubCallableFactory { + @Override + public UnaryCallable createUnaryCallable( + GrpcCallSettings grpcCallSettings, + UnaryCallSettings callSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); + } + + @Override + public + UnaryCallable createPagedCallable( + GrpcCallSettings grpcCallSettings, + PagedCallSettings pagedCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createPagedCallable( + grpcCallSettings, pagedCallSettings, clientContext); + } + + @Override + public UnaryCallable createBatchingCallable( + GrpcCallSettings grpcCallSettings, + BatchingCallSettings batchingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBatchingCallable( + grpcCallSettings, batchingCallSettings, clientContext); + } + + @BetaApi( + "The surface for long-running operations is not stable yet and may change in the future.") + @Override + public + OperationCallable createOperationCallable( + GrpcCallSettings grpcCallSettings, + OperationCallSettings operationCallSettings, + ClientContext clientContext, + OperationsStub operationsStub) { + return GrpcCallableFactory.createOperationCallable( + grpcCallSettings, operationCallSettings, clientContext, operationsStub); + } + + @Override + public + BidiStreamingCallable createBidiStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createBidiStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ServerStreamingCallable createServerStreamingCallable( + GrpcCallSettings grpcCallSettings, + ServerStreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createServerStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } + + @Override + public + ClientStreamingCallable createClientStreamingCallable( + GrpcCallSettings grpcCallSettings, + StreamingCallSettings streamingCallSettings, + ClientContext clientContext) { + return GrpcCallableFactory.createClientStreamingCallable( + grpcCallSettings, streamingCallSettings, clientContext); + } +} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java new file mode 100644 index 0000000000..568d2ce688 --- /dev/null +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadStub.java @@ -0,0 +1,225 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2.stub; + +import com.google.api.core.BetaApi; +import com.google.api.gax.core.BackgroundResource; +import com.google.api.gax.core.BackgroundResourceAggregation; +import com.google.api.gax.grpc.GrpcCallSettings; +import com.google.api.gax.grpc.GrpcStubCallableFactory; +import com.google.api.gax.rpc.ClientContext; +import com.google.api.gax.rpc.RequestParamsExtractor; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest; +import com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse; +import com.google.cloud.bigquery.storage.v1beta2.ReadSession; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest; +import com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse; +import com.google.common.collect.ImmutableMap; +import io.grpc.MethodDescriptor; +import io.grpc.protobuf.ProtoUtils; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import javax.annotation.Generated; + +// AUTO-GENERATED DOCUMENTATION AND CLASS +/** + * gRPC stub implementation for BigQuery Storage API. + * + *

This class is for advanced usage and reflects the underlying API directly. + */ +@Generated("by gapic-generator") +@BetaApi("A restructuring of stub classes is planned, so this may break in the future") +public class GrpcBigQueryReadStub extends BigQueryReadStub { + + private static final MethodDescriptor + createReadSessionMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryRead/CreateReadSession") + .setRequestMarshaller( + ProtoUtils.marshaller(CreateReadSessionRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadSession.getDefaultInstance())) + .build(); + private static final MethodDescriptor + readRowsMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName("google.cloud.bigquery.storage.v1beta2.BigQueryRead/ReadRows") + .setRequestMarshaller(ProtoUtils.marshaller(ReadRowsRequest.getDefaultInstance())) + .setResponseMarshaller(ProtoUtils.marshaller(ReadRowsResponse.getDefaultInstance())) + .build(); + private static final MethodDescriptor + splitReadStreamMethodDescriptor = + MethodDescriptor.newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.cloud.bigquery.storage.v1beta2.BigQueryRead/SplitReadStream") + .setRequestMarshaller( + ProtoUtils.marshaller(SplitReadStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(SplitReadStreamResponse.getDefaultInstance())) + .build(); + + private final BackgroundResource backgroundResources; + + private final UnaryCallable createReadSessionCallable; + private final ServerStreamingCallable readRowsCallable; + private final UnaryCallable + splitReadStreamCallable; + + private final GrpcStubCallableFactory callableFactory; + + public static final GrpcBigQueryReadStub create(BigQueryReadStubSettings settings) + throws IOException { + return new GrpcBigQueryReadStub(settings, ClientContext.create(settings)); + } + + public static final GrpcBigQueryReadStub create(ClientContext clientContext) throws IOException { + return new GrpcBigQueryReadStub(BigQueryReadStubSettings.newBuilder().build(), clientContext); + } + + public static final GrpcBigQueryReadStub create( + ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { + return new GrpcBigQueryReadStub( + BigQueryReadStubSettings.newBuilder().build(), clientContext, callableFactory); + } + + /** + * Constructs an instance of GrpcBigQueryReadStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryReadStub(BigQueryReadStubSettings settings, ClientContext clientContext) + throws IOException { + this(settings, clientContext, new GrpcBigQueryReadCallableFactory()); + } + + /** + * Constructs an instance of GrpcBigQueryReadStub, using the given settings. This is protected so + * that it is easy to make a subclass, but otherwise, the static factory methods should be + * preferred. + */ + protected GrpcBigQueryReadStub( + BigQueryReadStubSettings settings, + ClientContext clientContext, + GrpcStubCallableFactory callableFactory) + throws IOException { + this.callableFactory = callableFactory; + + GrpcCallSettings createReadSessionTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(createReadSessionMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(CreateReadSessionRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put( + "read_session.table", String.valueOf(request.getReadSession().getTable())); + return params.build(); + } + }) + .build(); + GrpcCallSettings readRowsTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(readRowsMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(ReadRowsRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("read_stream", String.valueOf(request.getReadStream())); + return params.build(); + } + }) + .build(); + GrpcCallSettings + splitReadStreamTransportSettings = + GrpcCallSettings.newBuilder() + .setMethodDescriptor(splitReadStreamMethodDescriptor) + .setParamsExtractor( + new RequestParamsExtractor() { + @Override + public Map extract(SplitReadStreamRequest request) { + ImmutableMap.Builder params = ImmutableMap.builder(); + params.put("name", String.valueOf(request.getName())); + return params.build(); + } + }) + .build(); + + this.createReadSessionCallable = + callableFactory.createUnaryCallable( + createReadSessionTransportSettings, + settings.createReadSessionSettings(), + clientContext); + this.readRowsCallable = + callableFactory.createServerStreamingCallable( + readRowsTransportSettings, settings.readRowsSettings(), clientContext); + this.splitReadStreamCallable = + callableFactory.createUnaryCallable( + splitReadStreamTransportSettings, settings.splitReadStreamSettings(), clientContext); + + backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); + } + + public UnaryCallable createReadSessionCallable() { + return createReadSessionCallable; + } + + public ServerStreamingCallable readRowsCallable() { + return readRowsCallable; + } + + public UnaryCallable splitReadStreamCallable() { + return splitReadStreamCallable; + } + + @Override + public final void close() { + shutdown(); + } + + @Override + public void shutdown() { + backgroundResources.shutdown(); + } + + @Override + public boolean isShutdown() { + return backgroundResources.isShutdown(); + } + + @Override + public boolean isTerminated() { + return backgroundResources.isTerminated(); + } + + @Override + public void shutdownNow() { + backgroundResources.shutdownNow(); + } + + @Override + public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { + return backgroundResources.awaitTermination(duration, unit); + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java new file mode 100644 index 0000000000..d44de19e0e --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java @@ -0,0 +1,129 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiStreamObserver; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.StatusCode; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class BigQueryWriteClientTest { + private static MockBigQueryWrite mockBigQueryWrite; + private static MockServiceHelper serviceHelper; + private BigQueryWriteClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryWrite = new MockBigQueryWrite(); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + BigQueryWriteSettings settings = + BigQueryWriteSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BigQueryWriteClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + public void appendRowsTest() throws Exception { + long offset = 1019779949L; + AppendRowsResponse expectedResponse = AppendRowsResponse.newBuilder().setOffset(offset).build(); + mockBigQueryWrite.addResponse(expectedResponse); + AppendRowsRequest request = AppendRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + requestObserver.onCompleted(); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + @SuppressWarnings("all") + public void appendRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryWrite.addException(exception); + AppendRowsRequest request = AppendRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + BidiStreamingCallable callable = + client.appendRowsCallable(); + ApiStreamObserver requestObserver = + callable.bidiStreamingCall(responseObserver); + + requestObserver.onNext(request); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java new file mode 100644 index 0000000000..59f43a891b --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockBigQueryWrite implements MockGrpcService { + private final MockBigQueryWriteImpl serviceImpl; + + public MockBigQueryWrite() { + serviceImpl = new MockBigQueryWriteImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java new file mode 100644 index 0000000000..d6efc1c50e --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java @@ -0,0 +1,159 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1alpha2.BigQueryWriteGrpc.BigQueryWriteImplBase; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse; +import com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockBigQueryWriteImpl extends BigQueryWriteImplBase { + private List requests; + private Queue responses; + + public MockBigQueryWriteImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createWriteStream( + CreateWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext((WriteStream) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public StreamObserver appendRows( + final StreamObserver responseObserver) { + final Object response = responses.remove(); + StreamObserver requestObserver = + new StreamObserver() { + @Override + public void onNext(AppendRowsRequest value) { + if (response instanceof AppendRowsResponse) { + responseObserver.onNext((AppendRowsResponse) response); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void onError(Throwable t) { + responseObserver.onError(t); + } + + @Override + public void onCompleted() { + responseObserver.onCompleted(); + } + }; + return requestObserver; + } + + @Override + public void getWriteStream( + GetWriteStreamRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof WriteStream) { + requests.add(request); + responseObserver.onNext((WriteStream) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void finalizeWriteStream( + FinalizeWriteStreamRequest request, + StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof FinalizeWriteStreamResponse) { + requests.add(request); + responseObserver.onNext((FinalizeWriteStreamResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void batchCommitWriteStreams( + BatchCommitWriteStreamsRequest request, + StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof BatchCommitWriteStreamsResponse) { + requests.add(request); + responseObserver.onNext((BatchCommitWriteStreamsResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java new file mode 100644 index 0000000000..8adb07154f --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClientTest.java @@ -0,0 +1,166 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.grpc.testing.LocalChannelProvider; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.api.gax.grpc.testing.MockServiceHelper; +import com.google.api.gax.grpc.testing.MockStreamObserver; +import com.google.api.gax.rpc.ApiClientHeaderProvider; +import com.google.api.gax.rpc.InvalidArgumentException; +import com.google.api.gax.rpc.ServerStreamingCallable; +import com.google.api.gax.rpc.StatusCode; +import com.google.protobuf.AbstractMessage; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +@javax.annotation.Generated("by GAPIC") +public class BaseBigQueryReadClientTest { + private static MockBigQueryRead mockBigQueryRead; + private static MockServiceHelper serviceHelper; + private BaseBigQueryReadClient client; + private LocalChannelProvider channelProvider; + + @BeforeClass + public static void startStaticServer() { + mockBigQueryRead = new MockBigQueryRead(); + serviceHelper = + new MockServiceHelper( + UUID.randomUUID().toString(), Arrays.asList(mockBigQueryRead)); + serviceHelper.start(); + } + + @AfterClass + public static void stopServer() { + serviceHelper.stop(); + } + + @Before + public void setUp() throws IOException { + serviceHelper.reset(); + channelProvider = serviceHelper.createChannelProvider(); + BaseBigQueryReadSettings settings = + BaseBigQueryReadSettings.newBuilder() + .setTransportChannelProvider(channelProvider) + .setCredentialsProvider(NoCredentialsProvider.create()) + .build(); + client = BaseBigQueryReadClient.create(settings); + } + + @After + public void tearDown() throws Exception { + client.close(); + } + + @Test + @SuppressWarnings("all") + public void createReadSessionTest() { + String name = "name3373707"; + String table = "table110115790"; + ReadSession expectedResponse = ReadSession.newBuilder().setName(name).setTable(table).build(); + mockBigQueryRead.addResponse(expectedResponse); + + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + ReadSession actualResponse = client.createReadSession(parent, readSession, maxStreamCount); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockBigQueryRead.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + CreateReadSessionRequest actualRequest = (CreateReadSessionRequest) actualRequests.get(0); + + Assert.assertEquals(parent, actualRequest.getParent()); + Assert.assertEquals(readSession, actualRequest.getReadSession()); + Assert.assertEquals(maxStreamCount, actualRequest.getMaxStreamCount()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + @SuppressWarnings("all") + public void createReadSessionExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + + try { + String parent = "parent-995424086"; + ReadSession readSession = ReadSession.newBuilder().build(); + int maxStreamCount = 940837515; + + client.createReadSession(parent, readSession, maxStreamCount); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception + } + } + + @Test + @SuppressWarnings("all") + public void readRowsTest() throws Exception { + long rowCount = 1340416618L; + ReadRowsResponse expectedResponse = ReadRowsResponse.newBuilder().setRowCount(rowCount).build(); + mockBigQueryRead.addResponse(expectedResponse); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + List actualResponses = responseObserver.future().get(); + Assert.assertEquals(1, actualResponses.size()); + Assert.assertEquals(expectedResponse, actualResponses.get(0)); + } + + @Test + @SuppressWarnings("all") + public void readRowsExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); + mockBigQueryRead.addException(exception); + ReadRowsRequest request = ReadRowsRequest.newBuilder().build(); + + MockStreamObserver responseObserver = new MockStreamObserver<>(); + + ServerStreamingCallable callable = client.readRowsCallable(); + callable.serverStreamingCall(request, responseObserver); + + try { + List actualResponses = responseObserver.future().get(); + Assert.fail("No exception thrown"); + } catch (ExecutionException e) { + Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); + InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); + Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); + } + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java new file mode 100644 index 0000000000..d1a8d888fa --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryRead.java @@ -0,0 +1,57 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.api.gax.grpc.testing.MockGrpcService; +import com.google.protobuf.AbstractMessage; +import io.grpc.ServerServiceDefinition; +import java.util.List; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockBigQueryRead implements MockGrpcService { + private final MockBigQueryReadImpl serviceImpl; + + public MockBigQueryRead() { + serviceImpl = new MockBigQueryReadImpl(); + } + + @Override + public List getRequests() { + return serviceImpl.getRequests(); + } + + @Override + public void addResponse(AbstractMessage response) { + serviceImpl.addResponse(response); + } + + @Override + public void addException(Exception exception) { + serviceImpl.addException(exception); + } + + @Override + public ServerServiceDefinition getServiceDefinition() { + return serviceImpl.bindService(); + } + + @Override + public void reset() { + serviceImpl.reset(); + } +} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java new file mode 100644 index 0000000000..abade32d73 --- /dev/null +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryReadImpl.java @@ -0,0 +1,102 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import com.google.api.core.BetaApi; +import com.google.cloud.bigquery.storage.v1beta2.BigQueryReadGrpc.BigQueryReadImplBase; +import com.google.protobuf.AbstractMessage; +import io.grpc.stub.StreamObserver; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; + +@javax.annotation.Generated("by GAPIC") +@BetaApi +public class MockBigQueryReadImpl extends BigQueryReadImplBase { + private List requests; + private Queue responses; + + public MockBigQueryReadImpl() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + public List getRequests() { + return requests; + } + + public void addResponse(AbstractMessage response) { + responses.add(response); + } + + public void setResponses(List responses) { + this.responses = new LinkedList(responses); + } + + public void addException(Exception exception) { + responses.add(exception); + } + + public void reset() { + requests = new ArrayList<>(); + responses = new LinkedList<>(); + } + + @Override + public void createReadSession( + CreateReadSessionRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof ReadSession) { + requests.add(request); + responseObserver.onNext((ReadSession) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void readRows(ReadRowsRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof ReadRowsResponse) { + requests.add(request); + responseObserver.onNext((ReadRowsResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } + + @Override + public void splitReadStream( + SplitReadStreamRequest request, StreamObserver responseObserver) { + Object response = responses.remove(); + if (response instanceof SplitReadStreamResponse) { + requests.add(request); + responseObserver.onNext((SplitReadStreamResponse) response); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError((Exception) response); + } else { + responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); + } + } +} diff --git a/grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml b/grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml new file mode 100644 index 0000000000..0118cee6c8 --- /dev/null +++ b/grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml @@ -0,0 +1,56 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha2 + 0.85.2-SNAPSHOT + grpc-google-cloud-bigquerystorage-v1alpha2 + GRPC library for grpc-google-cloud-bigquerystorage-v1alpha2 + + com.google.cloud + google-cloud-bigquerystorage-parent + 0.120.2-beta-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha2 + + + com.google.guava + guava + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + \ No newline at end of file diff --git a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java new file mode 100644 index 0000000000..f0b96c7d6c --- /dev/null +++ b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java @@ -0,0 +1,900 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1alpha2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; +import static io.grpc.stub.ClientCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ClientCalls.asyncUnaryCall; +import static io.grpc.stub.ClientCalls.blockingUnaryCall; +import static io.grpc.stub.ClientCalls.futureUnaryCall; +import static io.grpc.stub.ServerCalls.asyncBidiStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnaryCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; + +/** + * + * + *
+ * BigQuery Write API.
+ * The Write API can be used to write data to BigQuery.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: google/cloud/bigquery/storage/v1alpha2/storage.proto") +public final class BigQueryWriteGrpc { + + private BigQueryWriteGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getCreateWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateWriteStream", + requestType = + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getCreateWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getCreateWriteStreamMethod; + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { + BigQueryWriteGrpc.getCreateWriteStreamMethod = + getCreateWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage + .CreateWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("CreateWriteStream")) + .build(); + } + } + } + return getCreateWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> + getAppendRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "AppendRows", + requestType = com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> + getAppendRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> + getAppendRowsMethod; + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { + BigQueryWriteGrpc.getAppendRowsMethod = + getAppendRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "AppendRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("AppendRows")) + .build(); + } + } + } + return getAppendRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getGetWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "GetWriteStream", + requestType = com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getGetWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getGetWriteStreamMethod; + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { + BigQueryWriteGrpc.getGetWriteStreamMethod = + getGetWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage + .GetWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("GetWriteStream")) + .build(); + } + } + } + return getGetWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "FinalizeWriteStream", + requestType = + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> + getFinalizeWriteStreamMethod; + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) + == null) { + BigQueryWriteGrpc.getFinalizeWriteStreamMethod = + getFinalizeWriteStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "FinalizeWriteStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage + .FinalizeWriteStreamRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage + .FinalizeWriteStreamResponse.getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("FinalizeWriteStream")) + .build(); + } + } + } + return getFinalizeWriteStreamMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "BatchCommitWriteStreams", + requestType = + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest.class, + responseType = + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> + getBatchCommitWriteStreamsMethod; + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + synchronized (BigQueryWriteGrpc.class) { + if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) + == null) { + BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod = + getBatchCommitWriteStreamsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "BatchCommitWriteStreams")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsResponse.getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryWriteMethodDescriptorSupplier("BatchCommitWriteStreams")) + .build(); + } + } + } + return getBatchCommitWriteStreamsMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryWriteStub newStub(io.grpc.Channel channel) { + return new BigQueryWriteStub(channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryWriteBlockingStub newBlockingStub(io.grpc.Channel channel) { + return new BigQueryWriteBlockingStub(channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryWriteFutureStub newFutureStub(io.grpc.Channel channel) { + return new BigQueryWriteFutureStub(channel); + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * 
+ */ + public abstract static class BigQueryWriteImplBase implements io.grpc.BindableService { + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * 
+ */ + public void createWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnimplementedUnaryCall(getCreateWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * stream. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains the offset at which the append happened. Responses
+     * are received in the same order in which requests are sent. There will be
+     * one response for each successful request. If the `offset` is not set in
+     * response, it means append didn't happen due to some errors. If one request
+     * fails, all the subsequent requests will also fail until a success request
+     * is made again.
+     * If the stream is of `PENDING` type, data will only be available for read
+     * operations after the stream is committed.
+     * 
+ */ + public io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest> + appendRows( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> + responseObserver) { + return asyncUnimplementedStreamingCall(getAppendRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + public void getWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnimplementedUnaryCall(getGetWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream.
+     * 
+ */ + public void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> + responseObserver) { + asyncUnimplementedUnaryCall(getFinalizeWriteStreamMethod(), responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> + responseObserver) { + asyncUnimplementedUnaryCall(getBatchCommitWriteStreamsMethod(), responseObserver); + } + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateWriteStreamMethod(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>( + this, METHODID_CREATE_WRITE_STREAM))) + .addMethod( + getAppendRowsMethod(), + asyncBidiStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse>( + this, METHODID_APPEND_ROWS))) + .addMethod( + getGetWriteStreamMethod(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>( + this, METHODID_GET_WRITE_STREAM))) + .addMethod( + getFinalizeWriteStreamMethod(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage + .FinalizeWriteStreamResponse>(this, METHODID_FINALIZE_WRITE_STREAM))) + .addMethod( + getBatchCommitWriteStreamsMethod(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsRequest, + com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsResponse>( + this, METHODID_BATCH_COMMIT_WRITE_STREAMS))) + .build(); + } + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * 
+ */ + public static final class BigQueryWriteStub extends io.grpc.stub.AbstractStub { + private BigQueryWriteStub(io.grpc.Channel channel) { + super(channel); + } + + private BigQueryWriteStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * 
+ */ + public void createWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Appends data to the given stream.
+     * If `offset` is specified, the `offset` is checked against the end of
+     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
+     * attempt is made to append to an offset beyond the current end of the stream
+     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
+     * written to. User can retry with adjusted offset within the same RPC
+     * stream. If `offset` is not specified, append happens at the end of the
+     * stream.
+     * The response contains the offset at which the append happened. Responses
+     * are received in the same order in which requests are sent. There will be
+     * one response for each successful request. If the `offset` is not set in
+     * response, it means append didn't happen due to some errors. If one request
+     * fails, all the subsequent requests will also fail until a success request
+     * is made again.
+     * If the stream is of `PENDING` type, data will only be available for read
+     * operations after the stream is committed.
+     * 
+ */ + public io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest> + appendRows( + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> + responseObserver) { + return asyncBidiStreamingCall( + getChannel().newCall(getAppendRowsMethod(), getCallOptions()), responseObserver); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + public void getWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream.
+     * 
+ */ + public void finalizeWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> + responseObserver) { + asyncUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public void batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> + responseObserver) { + asyncUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * 
+ */ + public static final class BigQueryWriteBlockingStub + extends io.grpc.stub.AbstractStub { + private BigQueryWriteBlockingStub(io.grpc.Channel channel) { + super(channel); + } + + private BigQueryWriteBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream createWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request) { + return blockingUnaryCall( + getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request) { + return blockingUnaryCall(getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request) { + return blockingUnaryCall( + getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + request) { + return blockingUnaryCall( + getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); + } + } + + /** + * + * + *
+   * BigQuery Write API.
+   * The Write API can be used to write data to BigQuery.
+   * 
+ */ + public static final class BigQueryWriteFutureStub + extends io.grpc.stub.AbstractStub { + private BigQueryWriteFutureStub(io.grpc.Channel channel) { + super(channel); + } + + private BigQueryWriteFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryWriteFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryWriteFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a write stream to the given table.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + createWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request) { + return futureUnaryCall( + getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Gets a write stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> + getWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request) { + return futureUnaryCall( + getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Finalize a write stream so that no new data can be appended to the
+     * stream.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> + finalizeWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request) { + return futureUnaryCall( + getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Atomically commits a group of `PENDING` streams that belong to the same
+     * `parent` table.
+     * Streams must be finalized before commit and cannot be committed multiple
+     * times. Once a stream is committed, data in the stream becomes available
+     * for read operations.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> + batchCommitWriteStreams( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + request) { + return futureUnaryCall( + getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_WRITE_STREAM = 0; + private static final int METHODID_GET_WRITE_STREAM = 1; + private static final int METHODID_FINALIZE_WRITE_STREAM = 2; + private static final int METHODID_BATCH_COMMIT_WRITE_STREAMS = 3; + private static final int METHODID_APPEND_ROWS = 4; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final BigQueryWriteImplBase serviceImpl; + private final int methodId; + + MethodHandlers(BigQueryWriteImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_WRITE_STREAM: + serviceImpl.createWriteStream( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>) + responseObserver); + break; + case METHODID_GET_WRITE_STREAM: + serviceImpl.getWriteStream( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>) + responseObserver); + break; + case METHODID_FINALIZE_WRITE_STREAM: + serviceImpl.finalizeWriteStream( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage + .FinalizeWriteStreamResponse>) + responseObserver); + break; + case METHODID_BATCH_COMMIT_WRITE_STREAMS: + serviceImpl.batchCommitWriteStreams( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) + request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_APPEND_ROWS: + return (io.grpc.stub.StreamObserver) + serviceImpl.appendRows( + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse>) + responseObserver); + default: + throw new AssertionError(); + } + } + } + + private abstract static class BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryWriteBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryWrite"); + } + } + + private static final class BigQueryWriteFileDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier { + BigQueryWriteFileDescriptorSupplier() {} + } + + private static final class BigQueryWriteMethodDescriptorSupplier + extends BigQueryWriteBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + BigQueryWriteMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryWriteGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryWriteFileDescriptorSupplier()) + .addMethod(getCreateWriteStreamMethod()) + .addMethod(getAppendRowsMethod()) + .addMethod(getGetWriteStreamMethod()) + .addMethod(getFinalizeWriteStreamMethod()) + .addMethod(getBatchCommitWriteStreamsMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml new file mode 100644 index 0000000000..5e5c7a3ceb --- /dev/null +++ b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -0,0 +1,56 @@ + + 4.0.0 + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + 0.85.2-SNAPSHOT + grpc-google-cloud-bigquerystorage-v1beta2 + GRPC library for grpc-google-cloud-bigquerystorage-v1beta2 + + com.google.cloud + google-cloud-bigquerystorage-parent + 0.120.2-beta-SNAPSHOT + + + + io.grpc + grpc-api + + + io.grpc + grpc-stub + + + io.grpc + grpc-protobuf + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + + + com.google.guava + guava + + + + + + java9 + + [9,) + + + + javax.annotation + javax.annotation-api + + + + + \ No newline at end of file diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java b/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java new file mode 100644 index 0000000000..40c35541fc --- /dev/null +++ b/grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java @@ -0,0 +1,703 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigquery.storage.v1beta2; + +import static io.grpc.MethodDescriptor.generateFullMethodName; +import static io.grpc.stub.ClientCalls.asyncServerStreamingCall; +import static io.grpc.stub.ClientCalls.asyncUnaryCall; +import static io.grpc.stub.ClientCalls.blockingServerStreamingCall; +import static io.grpc.stub.ClientCalls.blockingUnaryCall; +import static io.grpc.stub.ClientCalls.futureUnaryCall; +import static io.grpc.stub.ServerCalls.asyncServerStreamingCall; +import static io.grpc.stub.ServerCalls.asyncUnaryCall; +import static io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall; + +/** + * + * + *
+ * BigQuery Read API.
+ * The Read API can be used to read data from BigQuery.
+ * 
+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler", + comments = "Source: google/cloud/bigquery/storage/v1beta2/storage.proto") +public final class BigQueryReadGrpc { + + private BigQueryReadGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.bigquery.storage.v1beta2.BigQueryRead"; + + // Static method descriptors that strictly reflect the proto. + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + getCreateReadSessionMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "CreateReadSession", + requestType = com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.ReadSession.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + getCreateReadSessionMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + getCreateReadSessionMethod; + if ((getCreateReadSessionMethod = BigQueryReadGrpc.getCreateReadSessionMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getCreateReadSessionMethod = BigQueryReadGrpc.getCreateReadSessionMethod) == null) { + BigQueryReadGrpc.getCreateReadSessionMethod = + getCreateReadSessionMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateReadSession")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.ReadSession + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryReadMethodDescriptorSupplier("CreateReadSession")) + .build(); + } + } + } + return getCreateReadSessionMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + getReadRowsMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "ReadRows", + requestType = com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + getReadRowsMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse> + getReadRowsMethod; + if ((getReadRowsMethod = BigQueryReadGrpc.getReadRowsMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getReadRowsMethod = BigQueryReadGrpc.getReadRowsMethod) == null) { + BigQueryReadGrpc.getReadRowsMethod = + getReadRowsMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.SERVER_STREAMING) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "ReadRows")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse + .getDefaultInstance())) + .setSchemaDescriptor(new BigQueryReadMethodDescriptorSupplier("ReadRows")) + .build(); + } + } + } + return getReadRowsMethod; + } + + private static volatile io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + getSplitReadStreamMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "SplitReadStream", + requestType = com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.class, + responseType = com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + getSplitReadStreamMethod() { + io.grpc.MethodDescriptor< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + getSplitReadStreamMethod; + if ((getSplitReadStreamMethod = BigQueryReadGrpc.getSplitReadStreamMethod) == null) { + synchronized (BigQueryReadGrpc.class) { + if ((getSplitReadStreamMethod = BigQueryReadGrpc.getSplitReadStreamMethod) == null) { + BigQueryReadGrpc.getSplitReadStreamMethod = + getSplitReadStreamMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName(generateFullMethodName(SERVICE_NAME, "SplitReadStream")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + .getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + .getDefaultInstance())) + .setSchemaDescriptor( + new BigQueryReadMethodDescriptorSupplier("SplitReadStream")) + .build(); + } + } + } + return getSplitReadStreamMethod; + } + + /** Creates a new async stub that supports all call types for the service */ + public static BigQueryReadStub newStub(io.grpc.Channel channel) { + return new BigQueryReadStub(channel); + } + + /** + * Creates a new blocking-style stub that supports unary and streaming output calls on the service + */ + public static BigQueryReadBlockingStub newBlockingStub(io.grpc.Channel channel) { + return new BigQueryReadBlockingStub(channel); + } + + /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ + public static BigQueryReadFutureStub newFutureStub(io.grpc.Channel channel) { + return new BigQueryReadFutureStub(channel); + } + + /** + * + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public abstract static class BigQueryReadImplBase implements io.grpc.BindableService { + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 24 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public void createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnimplementedUnaryCall(getCreateReadSessionMethod(), responseObserver); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public void readRows( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnimplementedUnaryCall(getReadRowsMethod(), responseObserver); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public void splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + responseObserver) { + asyncUnimplementedUnaryCall(getSplitReadStreamMethod(), responseObserver); + } + + @java.lang.Override + public final io.grpc.ServerServiceDefinition bindService() { + return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) + .addMethod( + getCreateReadSessionMethod(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadSession>( + this, METHODID_CREATE_READ_SESSION))) + .addMethod( + getReadRowsMethod(), + asyncServerStreamingCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse>( + this, METHODID_READ_ROWS))) + .addMethod( + getSplitReadStreamMethod(), + asyncUnaryCall( + new MethodHandlers< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse>( + this, METHODID_SPLIT_READ_STREAM))) + .build(); + } + } + + /** + * + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadStub extends io.grpc.stub.AbstractStub { + private BigQueryReadStub(io.grpc.Channel channel) { + super(channel); + } + + private BigQueryReadStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 24 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public void createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), + request, + responseObserver); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public void readRows( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request, + io.grpc.stub.StreamObserver + responseObserver) { + asyncServerStreamingCall( + getChannel().newCall(getReadRowsMethod(), getCallOptions()), request, responseObserver); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public void splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request, + io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + responseObserver) { + asyncUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), + request, + responseObserver); + } + } + + /** + * + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadBlockingStub + extends io.grpc.stub.AbstractStub { + private BigQueryReadBlockingStub(io.grpc.Channel channel) { + super(channel); + } + + private BigQueryReadBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadBlockingStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadBlockingStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 24 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request) { + return blockingUnaryCall( + getChannel(), getCreateReadSessionMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Reads rows from the stream in the format prescribed by the ReadSession.
+     * Each response contains one or more table rows, up to a maximum of 100 MiB
+     * per response; read requests which attempt to read individual rows larger
+     * than 100 MiB will fail.
+     * Each request also returns a set of stream statistics reflecting the current
+     * state of the stream.
+     * 
+ */ + public java.util.Iterator readRows( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest request) { + return blockingServerStreamingCall( + getChannel(), getReadRowsMethod(), getCallOptions(), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse splitReadStream( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request) { + return blockingUnaryCall(getChannel(), getSplitReadStreamMethod(), getCallOptions(), request); + } + } + + /** + * + * + *
+   * BigQuery Read API.
+   * The Read API can be used to read data from BigQuery.
+   * 
+ */ + public static final class BigQueryReadFutureStub + extends io.grpc.stub.AbstractStub { + private BigQueryReadFutureStub(io.grpc.Channel channel) { + super(channel); + } + + private BigQueryReadFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + super(channel, callOptions); + } + + @java.lang.Override + protected BigQueryReadFutureStub build( + io.grpc.Channel channel, io.grpc.CallOptions callOptions) { + return new BigQueryReadFutureStub(channel, callOptions); + } + + /** + * + * + *
+     * Creates a new read session. A read session divides the contents of a
+     * BigQuery table into one or more streams, which can then be used to read
+     * data from the table. The read session also specifies properties of the
+     * data to be read, such as a list of columns or a push-down filter describing
+     * the rows to be returned.
+     * A particular row can be read by at most one stream. When the caller has
+     * reached the end of each stream in the session, then all the data in the
+     * table has been read.
+     * Data is assigned to each stream such that roughly the same number of
+     * rows can be read from each stream. Because the server-side unit for
+     * assigning data is collections of rows, the API does not guarantee that
+     * each stream will return the same number or rows. Additionally, the
+     * limits are enforced based on the number of pre-filtered rows, so some
+     * filters can lead to lopsided assignments.
+     * Read sessions automatically expire 24 hours after they are created and do
+     * not require manual clean-up by the caller.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.ReadSession> + createReadSession( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest request) { + return futureUnaryCall( + getChannel().newCall(getCreateReadSessionMethod(), getCallOptions()), request); + } + + /** + * + * + *
+     * Splits a given `ReadStream` into two `ReadStream` objects. These
+     * `ReadStream` objects are referred to as the primary and the residual
+     * streams of the split. The original `ReadStream` can still be read from in
+     * the same manner as before. Both of the returned `ReadStream` objects can
+     * also be read from, and the rows returned by both child streams will be
+     * the same as the rows read from the original stream.
+     * Moreover, the two child streams will be allocated back-to-back in the
+     * original `ReadStream`. Concretely, it is guaranteed that for streams
+     * original, primary, and residual, that original[0-j] = primary[0-j] and
+     * original[j-n] = residual[0-m] once the streams have been read to
+     * completion.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse> + splitReadStream(com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest request) { + return futureUnaryCall( + getChannel().newCall(getSplitReadStreamMethod(), getCallOptions()), request); + } + } + + private static final int METHODID_CREATE_READ_SESSION = 0; + private static final int METHODID_READ_ROWS = 1; + private static final int METHODID_SPLIT_READ_STREAM = 2; + + private static final class MethodHandlers + implements io.grpc.stub.ServerCalls.UnaryMethod, + io.grpc.stub.ServerCalls.ServerStreamingMethod, + io.grpc.stub.ServerCalls.ClientStreamingMethod, + io.grpc.stub.ServerCalls.BidiStreamingMethod { + private final BigQueryReadImplBase serviceImpl; + private final int methodId; + + MethodHandlers(BigQueryReadImplBase serviceImpl, int methodId) { + this.serviceImpl = serviceImpl; + this.methodId = methodId; + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + case METHODID_CREATE_READ_SESSION: + serviceImpl.createReadSession( + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) request, + (io.grpc.stub.StreamObserver) + responseObserver); + break; + case METHODID_READ_ROWS: + serviceImpl.readRows( + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse>) + responseObserver); + break; + case METHODID_SPLIT_READ_STREAM: + serviceImpl.splitReadStream( + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) request, + (io.grpc.stub.StreamObserver< + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse>) + responseObserver); + break; + default: + throw new AssertionError(); + } + } + + @java.lang.Override + @java.lang.SuppressWarnings("unchecked") + public io.grpc.stub.StreamObserver invoke( + io.grpc.stub.StreamObserver responseObserver) { + switch (methodId) { + default: + throw new AssertionError(); + } + } + } + + private abstract static class BigQueryReadBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoFileDescriptorSupplier, + io.grpc.protobuf.ProtoServiceDescriptorSupplier { + BigQueryReadBaseDescriptorSupplier() {} + + @java.lang.Override + public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto.getDescriptor(); + } + + @java.lang.Override + public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { + return getFileDescriptor().findServiceByName("BigQueryRead"); + } + } + + private static final class BigQueryReadFileDescriptorSupplier + extends BigQueryReadBaseDescriptorSupplier { + BigQueryReadFileDescriptorSupplier() {} + } + + private static final class BigQueryReadMethodDescriptorSupplier + extends BigQueryReadBaseDescriptorSupplier + implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { + private final String methodName; + + BigQueryReadMethodDescriptorSupplier(String methodName) { + this.methodName = methodName; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { + return getServiceDescriptor().findMethodByName(methodName); + } + } + + private static volatile io.grpc.ServiceDescriptor serviceDescriptor; + + public static io.grpc.ServiceDescriptor getServiceDescriptor() { + io.grpc.ServiceDescriptor result = serviceDescriptor; + if (result == null) { + synchronized (BigQueryReadGrpc.class) { + result = serviceDescriptor; + if (result == null) { + serviceDescriptor = + result = + io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) + .setSchemaDescriptor(new BigQueryReadFileDescriptorSupplier()) + .addMethod(getCreateReadSessionMethod()) + .addMethod(getReadRowsMethod()) + .addMethod(getSplitReadStreamMethod()) + .build(); + } + } + } + return result; + } +} diff --git a/pom.xml b/pom.xml index 8287116890..68438d805d 100644 --- a/pom.xml +++ b/pom.xml @@ -78,16 +78,36 @@ + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha2 + 0.85.2-SNAPSHOT + com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 0.85.2-SNAPSHOT + + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + 0.85.2-SNAPSHOT + + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1alpha2 + 0.85.2-SNAPSHOT + com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 0.85.2-SNAPSHOT + + com.google.api.grpc + grpc-google-cloud-bigquerystorage-v1beta2 + 0.85.2-SNAPSHOT + com.google.cloud google-cloud-bigquerystorage @@ -178,8 +198,12 @@ + proto-google-cloud-bigquerystorage-v1alpha2 proto-google-cloud-bigquerystorage-v1beta1 + proto-google-cloud-bigquerystorage-v1beta2 + grpc-google-cloud-bigquerystorage-v1alpha2 grpc-google-cloud-bigquerystorage-v1beta1 + grpc-google-cloud-bigquerystorage-v1beta2 google-cloud-bigquerystorage google-cloud-bigquerystorage-bom diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/pom.xml b/proto-google-cloud-bigquerystorage-v1alpha2/pom.xml new file mode 100644 index 0000000000..57d6877330 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/pom.xml @@ -0,0 +1,25 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1alpha2 + 0.85.2-SNAPSHOT + proto-google-cloud-bigquerystorage-v1alpha2 + PROTO library for proto-google-cloud-bigquerystorage-v1alpha2 + + com.google.cloud + google-cloud-bigquerystorage-parent + 0.120.2-beta-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + \ No newline at end of file diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java new file mode 100644 index 0000000000..c41adbb233 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java @@ -0,0 +1,1702 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha2/protobuf.proto + +package com.google.cloud.bigquery.storage.v1alpha2; + +public final class ProtoBufProto { + private ProtoBufProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface ProtoSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Message descriptor for the data. The descriptor has to be self contained
+     * to include all the nested type definition, excepted for proto buffer well
+     * known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+     * and zetasql public protos
+     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + boolean hasProtoDescriptor(); + /** + * + * + *
+     * Message descriptor for the data. The descriptor has to be self contained
+     * to include all the nested type definition, excepted for proto buffer well
+     * known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+     * and zetasql public protos
+     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor(); + /** + * + * + *
+     * Message descriptor for the data. The descriptor has to be self contained
+     * to include all the nested type definition, excepted for proto buffer well
+     * known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+     * and zetasql public protos
+     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder getProtoDescriptorOrBuilder(); + } + /** + * + * + *
+   * Protobuf schema is an API presentation the proto buffer schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoSchema} + */ + public static final class ProtoSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) + ProtoSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProtoSchema.newBuilder() to construct. + private ProtoSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoSchema() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ProtoSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder subBuilder = null; + if (protoDescriptor_ != null) { + subBuilder = protoDescriptor_.toBuilder(); + } + protoDescriptor_ = + input.readMessage( + com.google.protobuf.DescriptorProtos.DescriptorProto.PARSER, + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(protoDescriptor_); + protoDescriptor_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder.class); + } + + public static final int PROTO_DESCRIPTOR_FIELD_NUMBER = 1; + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + /** + * + * + *
+     * Message descriptor for the data. The descriptor has to be self contained
+     * to include all the nested type definition, excepted for proto buffer well
+     * known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+     * and zetasql public protos
+     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + public boolean hasProtoDescriptor() { + return protoDescriptor_ != null; + } + /** + * + * + *
+     * Message descriptor for the data. The descriptor has to be self contained
+     * to include all the nested type definition, excepted for proto buffer well
+     * known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+     * and zetasql public protos
+     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + /** + * + * + *
+     * Message descriptor for the data. The descriptor has to be self contained
+     * to include all the nested type definition, excepted for proto buffer well
+     * known types
+     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+     * and zetasql public protos
+     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+     * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + return getProtoDescriptor(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (protoDescriptor_ != null) { + output.writeMessage(1, getProtoDescriptor()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (protoDescriptor_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getProtoDescriptor()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema other = + (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) obj; + + if (hasProtoDescriptor() != other.hasProtoDescriptor()) return false; + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().equals(other.getProtoDescriptor())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProtoDescriptor()) { + hash = (37 * hash) + PROTO_DESCRIPTOR_FIELD_NUMBER; + hash = (53 * hash) + getProtoDescriptor().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Protobuf schema is an API presentation the proto buffer schema.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoSchema} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.class, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = null; + } else { + protoDescriptor_ = null; + protoDescriptorBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema build() { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema result = + new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema(this); + if (protoDescriptorBuilder_ == null) { + result.protoDescriptor_ = protoDescriptor_; + } else { + result.protoDescriptor_ = protoDescriptorBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + .getDefaultInstance()) return this; + if (other.hasProtoDescriptor()) { + mergeProtoDescriptor(other.getProtoDescriptor()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasProtoDescriptor()) { + if (!getProtoDescriptor().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + protoDescriptorBuilder_; + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return Whether the protoDescriptor field is set. + */ + public boolean hasProtoDescriptor() { + return protoDescriptorBuilder_ != null || protoDescriptor_ != null; + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + * + * @return The protoDescriptor. + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { + if (protoDescriptorBuilder_ == null) { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } else { + return protoDescriptorBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + protoDescriptor_ = value; + onChanged(); + } else { + protoDescriptorBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder setProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder builderForValue) { + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = builderForValue.build(); + onChanged(); + } else { + protoDescriptorBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder mergeProtoDescriptor( + com.google.protobuf.DescriptorProtos.DescriptorProto value) { + if (protoDescriptorBuilder_ == null) { + if (protoDescriptor_ != null) { + protoDescriptor_ = + com.google.protobuf.DescriptorProtos.DescriptorProto.newBuilder(protoDescriptor_) + .mergeFrom(value) + .buildPartial(); + } else { + protoDescriptor_ = value; + } + onChanged(); + } else { + protoDescriptorBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public Builder clearProtoDescriptor() { + if (protoDescriptorBuilder_ == null) { + protoDescriptor_ = null; + onChanged(); + } else { + protoDescriptor_ = null; + protoDescriptorBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProto.Builder + getProtoDescriptorBuilder() { + + onChanged(); + return getProtoDescriptorFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder + getProtoDescriptorOrBuilder() { + if (protoDescriptorBuilder_ != null) { + return protoDescriptorBuilder_.getMessageOrBuilder(); + } else { + return protoDescriptor_ == null + ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() + : protoDescriptor_; + } + } + /** + * + * + *
+       * Message descriptor for the data. The descriptor has to be self contained
+       * to include all the nested type definition, excepted for proto buffer well
+       * known types
+       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
+       * and zetasql public protos
+       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
+       * 
+ * + * .google.protobuf.DescriptorProto proto_descriptor = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> + getProtoDescriptorFieldBuilder() { + if (protoDescriptorBuilder_ == null) { + protoDescriptorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.DescriptorProtos.DescriptorProto, + com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, + com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder>( + getProtoDescriptor(), getParentForChildren(), isClean()); + protoDescriptor_ = null; + } + return protoDescriptorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) + private static final com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProtoSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface ProtoRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.ProtoRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + java.util.List getSerializedRowsList(); + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + int getSerializedRowsCount(); + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + com.google.protobuf.ByteString getSerializedRows(int index); + } + /** + * + * + *
+   * Protobuf rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoRows} + */ + public static final class ProtoRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.ProtoRows) + ProtoRowsOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProtoRows.newBuilder() to construct. + private ProtoRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoRows() { + serializedRows_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoRows(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ProtoRows( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + serializedRows_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + serializedRows_.add(input.readBytes()); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + serializedRows_ = java.util.Collections.unmodifiableList(serializedRows_); // C + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.class, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder.class); + } + + public static final int SERIALIZED_ROWS_FIELD_NUMBER = 1; + private java.util.List serializedRows_; + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + public java.util.List getSerializedRowsList() { + return serializedRows_; + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + /** + * + * + *
+     * A sequence of rows serialized as a Protocol Buffer.
+     * See https://developers.google.com/protocol-buffers/docs/overview for more
+     * information on deserializing this field.
+     * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < serializedRows_.size(); i++) { + output.writeBytes(1, serializedRows_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < serializedRows_.size(); i++) { + dataSize += + com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(serializedRows_.get(i)); + } + size += dataSize; + size += 1 * getSerializedRowsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows other = + (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) obj; + + if (!getSerializedRowsList().equals(other.getSerializedRowsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSerializedRowsCount() > 0) { + hash = (37 * hash) + SERIALIZED_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRowsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Protobuf rows.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoRows} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.ProtoRows) + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.class, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + serializedRows_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto + .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows build() { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows result = + new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) != 0)) { + serializedRows_ = java.util.Collections.unmodifiableList(serializedRows_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.serializedRows_ = serializedRows_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + .getDefaultInstance()) return this; + if (!other.serializedRows_.isEmpty()) { + if (serializedRows_.isEmpty()) { + serializedRows_ = other.serializedRows_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSerializedRowsIsMutable(); + serializedRows_.addAll(other.serializedRows_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List serializedRows_ = + java.util.Collections.emptyList(); + + private void ensureSerializedRowsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + serializedRows_ = + new java.util.ArrayList(serializedRows_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return A list containing the serializedRows. + */ + public java.util.List getSerializedRowsList() { + return ((bitField0_ & 0x00000001) != 0) + ? java.util.Collections.unmodifiableList(serializedRows_) + : serializedRows_; + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return The count of serializedRows. + */ + public int getSerializedRowsCount() { + return serializedRows_.size(); + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index of the element to return. + * @return The serializedRows at the given index. + */ + public com.google.protobuf.ByteString getSerializedRows(int index) { + return serializedRows_.get(index); + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param index The index to set the value at. + * @param value The serializedRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedRows(int index, com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param value The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addSerializedRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSerializedRowsIsMutable(); + serializedRows_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @param values The serializedRows to add. + * @return This builder for chaining. + */ + public Builder addAllSerializedRows( + java.lang.Iterable values) { + ensureSerializedRowsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, serializedRows_); + onChanged(); + return this; + } + /** + * + * + *
+       * A sequence of rows serialized as a Protocol Buffer.
+       * See https://developers.google.com/protocol-buffers/docs/overview for more
+       * information on deserializing this field.
+       * 
+ * + * repeated bytes serialized_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRows() { + serializedRows_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.ProtoRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.ProtoRows) + private static final com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProtoRows(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n5google/cloud/bigquery/storage/v1alpha2" + + "/protobuf.proto\022&google.cloud.bigquery.s" + + "torage.v1alpha2\032 google/protobuf/descrip" + + "tor.proto\"I\n\013ProtoSchema\022:\n\020proto_descri" + + "ptor\030\001 \001(\0132 .google.protobuf.DescriptorP" + + "roto\"$\n\tProtoRows\022\027\n\017serialized_rows\030\001 \003" + + "(\014B\212\001\n*com.google.cloud.bigquery.storage" + + ".v1alpha2B\rProtoBufProtoZMgoogle.golang." + + "org/genproto/googleapis/cloud/bigquery/s" + + "torage/v1alpha2;storageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.protobuf.DescriptorProtos.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor, + new java.lang.String[] { + "ProtoDescriptor", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor, + new java.lang.String[] { + "SerializedRows", + }); + com.google.protobuf.DescriptorProtos.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java new file mode 100644 index 0000000000..e782eecd58 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java @@ -0,0 +1,8598 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha2/storage.proto + +package com.google.cloud.bigquery.storage.v1alpha2; + +public final class Storage { + private Storage() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface CreateWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + boolean hasWriteStream(); + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream(); + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder + getWriteStreamOrBuilder(); + } + /** + * + * + *
+   * Request message for `CreateWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest} + */ + public static final class CreateWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) + CreateWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateWriteStreamRequest.newBuilder() to construct. + private CreateWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateWriteStreamRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateWriteStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private CreateWriteStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: + { + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder subBuilder = + null; + if (writeStream_ != null) { + subBuilder = writeStream_.toBuilder(); + } + writeStream_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(writeStream_); + writeStream_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.Builder + .class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+     * Required. Reference to the table to which the stream belongs, in the format
+     * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream writeStream_; + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + public boolean hasWriteStream() { + return writeStream_ != null; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream() { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance() + : writeStream_; + } + /** + * + * + *
+     * Required. Stream to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder + getWriteStreamOrBuilder() { + return getWriteStream(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (writeStream_ != null) { + output.writeMessage(2, getWriteStream()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (writeStream_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getWriteStream()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasWriteStream() != other.hasWriteStream()) return false; + if (hasWriteStream()) { + if (!getWriteStream().equals(other.getWriteStream())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasWriteStream()) { + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Request message for `CreateWriteStream`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + if (writeStreamBuilder_ == null) { + writeStream_ = null; + } else { + writeStream_ = null; + writeStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest(this); + result.parent_ = parent_; + if (writeStreamBuilder_ == null) { + result.writeStream_ = writeStream_; + } else { + result.writeStream_ = writeStreamBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.hasWriteStream()) { + mergeWriteStream(other.getWriteStream()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest parsedMessage = + null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + * + * + *
+       * Required. Reference to the table to which the stream belongs, in the format
+       * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Required. Reference to the table to which the stream belongs, in the format
+       * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Required. Reference to the table to which the stream belongs, in the format
+       * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Reference to the table to which the stream belongs, in the format
+       * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Reference to the table to which the stream belongs, in the format
+       * of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream writeStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder> + writeStreamBuilder_; + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the writeStream field is set. + */ + public boolean hasWriteStream() { + return writeStreamBuilder_ != null || writeStream_ != null; + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The writeStream. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream() { + if (writeStreamBuilder_ == null) { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance() + : writeStream_; + } else { + return writeStreamBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writeStream_ = value; + onChanged(); + } else { + writeStreamBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder builderForValue) { + if (writeStreamBuilder_ == null) { + writeStream_ = builderForValue.build(); + onChanged(); + } else { + writeStreamBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeWriteStream( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream value) { + if (writeStreamBuilder_ == null) { + if (writeStream_ != null) { + writeStream_ = + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.newBuilder( + writeStream_) + .mergeFrom(value) + .buildPartial(); + } else { + writeStream_ = value; + } + onChanged(); + } else { + writeStreamBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearWriteStream() { + if (writeStreamBuilder_ == null) { + writeStream_ = null; + onChanged(); + } else { + writeStream_ = null; + writeStreamBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder + getWriteStreamBuilder() { + + onChanged(); + return getWriteStreamFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder + getWriteStreamOrBuilder() { + if (writeStreamBuilder_ != null) { + return writeStreamBuilder_.getMessageOrBuilder(); + } else { + return writeStream_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance() + : writeStream_; + } + } + /** + * + * + *
+       * Required. Stream to be created.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder> + getWriteStreamFieldBuilder() { + if (writeStreamBuilder_ == null) { + writeStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder>( + getWriteStream(), getParentForChildren(), isClean()); + writeStream_ = null; + } + return writeStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateWriteStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AppendRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
+     * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeStream. + */ + java.lang.String getWriteStream(); + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
+     * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for writeStream. + */ + com.google.protobuf.ByteString getWriteStreamBytes(); + + /** + * + * + *
+     * Optional. If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the offset field is set. + */ + boolean hasOffset(); + /** + * + * + *
+     * Optional. If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The offset. + */ + com.google.protobuf.Int64Value getOffset(); + /** + * + * + *
+     * Optional. If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); + + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return Whether the protoRows field is set. + */ + boolean hasProtoRows(); + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return The protoRows. + */ + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData getProtoRows(); + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder(); + + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.RowsCase + getRowsCase(); + } + /** + * + * + *
+   * Request message for `AppendRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest} + */ + public static final class AppendRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) + AppendRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use AppendRowsRequest.newBuilder() to construct. + private AppendRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsRequest() { + writeStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AppendRowsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + writeStream_ = s; + break; + } + case 18: + { + com.google.protobuf.Int64Value.Builder subBuilder = null; + if (offset_ != null) { + subBuilder = offset_.toBuilder(); + } + offset_ = + input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(offset_); + offset_ = subBuilder.buildPartial(); + } + + break; + } + case 34: + { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .Builder + subBuilder = null; + if (rowsCase_ == 4) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoData) + rows_) + .toBuilder(); + } + rows_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoData.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoData) + rows_); + rows_ = subBuilder.buildPartial(); + } + rowsCase_ = 4; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.Builder.class); + } + + public interface ProtoDataOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + boolean hasWriterSchema(); + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema getWriterSchema(); + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder + getWriterSchemaOrBuilder(); + + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + boolean hasRows(); + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + * + * @return The rows. + */ + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows getRows(); + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder + getRowsOrBuilder(); + } + /** Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData} */ + public static final class ProtoData extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) + ProtoDataOrBuilder { + private static final long serialVersionUID = 0L; + // Use ProtoData.newBuilder() to construct. + private ProtoData(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ProtoData() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ProtoData(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ProtoData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder + subBuilder = null; + if (writerSchema_ != null) { + subBuilder = writerSchema_.toBuilder(); + } + writerSchema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + .parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(writerSchema_); + writerSchema_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder + subBuilder = null; + if (rows_ != null) { + subBuilder = rows_.toBuilder(); + } + rows_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + .parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(rows_); + rows_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .Builder.class); + } + + public static final int WRITER_SCHEMA_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema writerSchema_; + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + public boolean hasWriterSchema() { + return writerSchema_ != null; + } + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + getWriterSchema() { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + .getDefaultInstance() + : writerSchema_; + } + /** + * + * + *
+       * Proto schema used to serialize the data.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder + getWriterSchemaOrBuilder() { + return getWriterSchema(); + } + + public static final int ROWS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows rows_; + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + public boolean hasRows() { + return rows_ != null; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + * + * @return The rows. + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows getRows() { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + .getDefaultInstance() + : rows_; + } + /** + * + * + *
+       * Serialized row data in protobuf message format.
+       * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder + getRowsOrBuilder() { + return getRows(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (writerSchema_ != null) { + output.writeMessage(1, getWriterSchema()); + } + if (rows_ != null) { + output.writeMessage(2, getRows()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (writerSchema_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriterSchema()); + } + if (rows_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRows()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) obj; + + if (hasWriterSchema() != other.hasWriterSchema()) return false; + if (hasWriterSchema()) { + if (!getWriterSchema().equals(other.getWriterSchema())) return false; + } + if (hasRows() != other.hasRows()) return false; + if (hasRows()) { + if (!getRows().equals(other.getRows())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasWriterSchema()) { + hash = (37 * hash) + WRITER_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getWriterSchema().hashCode(); + } + if (hasRows()) { + hash = (37 * hash) + ROWS_FIELD_NUMBER; + hash = (53 * hash) + getRows().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (writerSchemaBuilder_ == null) { + writerSchema_ = null; + } else { + writerSchema_ = null; + writerSchemaBuilder_ = null; + } + if (rowsBuilder_ == null) { + rows_ = null; + } else { + rows_ = null; + rowsBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData( + this); + if (writerSchemaBuilder_ == null) { + result.writerSchema_ = writerSchema_; + } else { + result.writerSchema_ = writerSchemaBuilder_.build(); + } + if (rowsBuilder_ == null) { + result.rows_ = rows_; + } else { + result.rows_ = rowsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance()) return this; + if (other.hasWriterSchema()) { + mergeWriterSchema(other.getWriterSchema()); + } + if (other.hasRows()) { + mergeRows(other.getRows()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (hasWriterSchema()) { + if (!getWriterSchema().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema writerSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder> + writerSchemaBuilder_; + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + * + * @return Whether the writerSchema field is set. + */ + public boolean hasWriterSchema() { + return writerSchemaBuilder_ != null || writerSchema_ != null; + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + * + * @return The writerSchema. + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + getWriterSchema() { + if (writerSchemaBuilder_ == null) { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + .getDefaultInstance() + : writerSchema_; + } else { + return writerSchemaBuilder_.getMessage(); + } + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + writerSchema_ = value; + onChanged(); + } else { + writerSchemaBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public Builder setWriterSchema( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder + builderForValue) { + if (writerSchemaBuilder_ == null) { + writerSchema_ = builderForValue.build(); + onChanged(); + } else { + writerSchemaBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public Builder mergeWriterSchema( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema value) { + if (writerSchemaBuilder_ == null) { + if (writerSchema_ != null) { + writerSchema_ = + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.newBuilder( + writerSchema_) + .mergeFrom(value) + .buildPartial(); + } else { + writerSchema_ = value; + } + onChanged(); + } else { + writerSchemaBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public Builder clearWriterSchema() { + if (writerSchemaBuilder_ == null) { + writerSchema_ = null; + onChanged(); + } else { + writerSchema_ = null; + writerSchemaBuilder_ = null; + } + + return this; + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder + getWriterSchemaBuilder() { + + onChanged(); + return getWriterSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder + getWriterSchemaOrBuilder() { + if (writerSchemaBuilder_ != null) { + return writerSchemaBuilder_.getMessageOrBuilder(); + } else { + return writerSchema_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema + .getDefaultInstance() + : writerSchema_; + } + } + /** + * + * + *
+         * Proto schema used to serialize the data.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder> + getWriterSchemaFieldBuilder() { + if (writerSchemaBuilder_ == null) { + writerSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder>( + getWriterSchema(), getParentForChildren(), isClean()); + writerSchema_ = null; + } + return writerSchemaBuilder_; + } + + private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows rows_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder> + rowsBuilder_; + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + * + * @return Whether the rows field is set. + */ + public boolean hasRows() { + return rowsBuilder_ != null || rows_ != null; + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + * + * @return The rows. + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows getRows() { + if (rowsBuilder_ == null) { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + .getDefaultInstance() + : rows_; + } else { + return rowsBuilder_.getMessage(); + } + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public Builder setRows( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows value) { + if (rowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + rowsBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public Builder setRows( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder + builderForValue) { + if (rowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + rowsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public Builder mergeRows( + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows value) { + if (rowsBuilder_ == null) { + if (rows_ != null) { + rows_ = + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.newBuilder( + rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + rowsBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public Builder clearRows() { + if (rowsBuilder_ == null) { + rows_ = null; + onChanged(); + } else { + rows_ = null; + rowsBuilder_ = null; + } + + return this; + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder + getRowsBuilder() { + + onChanged(); + return getRowsFieldBuilder().getBuilder(); + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder + getRowsOrBuilder() { + if (rowsBuilder_ != null) { + return rowsBuilder_.getMessageOrBuilder(); + } else { + return rows_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows + .getDefaultInstance() + : rows_; + } + } + /** + * + * + *
+         * Serialized row data in protobuf message format.
+         * 
+ * + * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder> + getRowsFieldBuilder() { + if (rowsBuilder_ == null) { + rowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder, + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder>( + getRows(), getParentForChildren(), isClean()); + rows_ = null; + } + return rowsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoData + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ProtoData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProtoData(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public enum RowsCase implements com.google.protobuf.Internal.EnumLite { + PROTO_ROWS(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 4: + return PROTO_ROWS; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public static final int WRITE_STREAM_FIELD_NUMBER = 1; + private volatile java.lang.Object writeStream_; + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
+     * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } + } + /** + * + * + *
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
+     * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private com.google.protobuf.Int64Value offset_; + /** + * + * + *
+     * Optional. If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return offset_ != null; + } + /** + * + * + *
+     * Optional. If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + /** + * + * + *
+     * Optional. If present, the write is only performed if the next append offset is same
+     * as the provided value. If not present, the write is performed at the
+     * current end of stream.
+     * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + return getOffset(); + } + + public static final int PROTO_ROWS_FIELD_NUMBER = 4; + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return Whether the protoRows field is set. + */ + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return The protoRows. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + getProtoRows() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + rows_; + } + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + rows_; + } + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + if (rowsCase_ == 4) { + if (!getProtoRows().isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getWriteStreamBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); + } + if (offset_ != null) { + output.writeMessage(2, getOffset()); + } + if (rowsCase_ == 4) { + output.writeMessage( + 4, + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) rows_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getWriteStreamBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); + } + if (offset_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + rows_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) obj; + + if (!getWriteStream().equals(other.getWriteStream())) return false; + if (hasOffset() != other.hasOffset()) return false; + if (hasOffset()) { + if (!getOffset().equals(other.getOffset())) return false; + } + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 4: + if (!getProtoRows().equals(other.getProtoRows())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getWriteStream().hashCode(); + if (hasOffset()) { + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + getOffset().hashCode(); + } + switch (rowsCase_) { + case 4: + hash = (37 * hash) + PROTO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getProtoRows().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Request message for `AppendRows`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + writeStream_ = ""; + + if (offsetBuilder_ == null) { + offset_ = null; + } else { + offset_ = null; + offsetBuilder_ = null; + } + rowsCase_ = 0; + rows_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest(this); + result.writeStream_ = writeStream_; + if (offsetBuilder_ == null) { + result.offset_ = offset_; + } else { + result.offset_ = offsetBuilder_.build(); + } + if (rowsCase_ == 4) { + if (protoRowsBuilder_ == null) { + result.rows_ = rows_; + } else { + result.rows_ = protoRowsBuilder_.build(); + } + } + result.rowsCase_ = rowsCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .getDefaultInstance()) return this; + if (!other.getWriteStream().isEmpty()) { + writeStream_ = other.writeStream_; + onChanged(); + } + if (other.hasOffset()) { + mergeOffset(other.getOffset()); + } + switch (other.getRowsCase()) { + case PROTO_ROWS: + { + mergeProtoRows(other.getProtoRows()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + if (rowsCase_ == 4) { + if (!getProtoRows().isInitialized()) { + return false; + } + } + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private java.lang.Object writeStream_ = ""; + /** + * + * + *
+       * Required. The stream that is the target of the append operation. This value must be
+       * specified for the initial request. If subsequent requests specify the
+       * stream name, it must equal to the value provided in the first request.
+       * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The writeStream. + */ + public java.lang.String getWriteStream() { + java.lang.Object ref = writeStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + writeStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Required. The stream that is the target of the append operation. This value must be
+       * specified for the initial request. If subsequent requests specify the
+       * stream name, it must equal to the value provided in the first request.
+       * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for writeStream. + */ + public com.google.protobuf.ByteString getWriteStreamBytes() { + java.lang.Object ref = writeStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + writeStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Required. The stream that is the target of the append operation. This value must be
+       * specified for the initial request. If subsequent requests specify the
+       * stream name, it must equal to the value provided in the first request.
+       * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + writeStream_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The stream that is the target of the append operation. This value must be
+       * specified for the initial request. If subsequent requests specify the
+       * stream name, it must equal to the value provided in the first request.
+       * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteStream() { + + writeStream_ = getDefaultInstance().getWriteStream(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The stream that is the target of the append operation. This value must be
+       * specified for the initial request. If subsequent requests specify the
+       * stream name, it must equal to the value provided in the first request.
+       * 
+ * + * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for writeStream to set. + * @return This builder for chaining. + */ + public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + writeStream_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Int64Value offset_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + offsetBuilder_; + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the offset field is set. + */ + public boolean hasOffset() { + return offsetBuilder_ != null || offset_ != null; + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The offset. + */ + public com.google.protobuf.Int64Value getOffset() { + if (offsetBuilder_ == null) { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } else { + return offsetBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + offset_ = value; + onChanged(); + } else { + offsetBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { + if (offsetBuilder_ == null) { + offset_ = builderForValue.build(); + onChanged(); + } else { + offsetBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeOffset(com.google.protobuf.Int64Value value) { + if (offsetBuilder_ == null) { + if (offset_ != null) { + offset_ = + com.google.protobuf.Int64Value.newBuilder(offset_).mergeFrom(value).buildPartial(); + } else { + offset_ = value; + } + onChanged(); + } else { + offsetBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearOffset() { + if (offsetBuilder_ == null) { + offset_ = null; + onChanged(); + } else { + offset_ = null; + offsetBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { + + onChanged(); + return getOffsetFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { + if (offsetBuilder_ != null) { + return offsetBuilder_.getMessageOrBuilder(); + } else { + return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; + } + } + /** + * + * + *
+       * Optional. If present, the write is only performed if the next append offset is same
+       * as the provided value. If not present, the write is performed at the
+       * current end of stream.
+       * 
+ * + * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder> + getOffsetFieldBuilder() { + if (offsetBuilder_ == null) { + offsetBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Int64Value, + com.google.protobuf.Int64Value.Builder, + com.google.protobuf.Int64ValueOrBuilder>( + getOffset(), getParentForChildren(), isClean()); + offset_ = null; + } + return offsetBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .Builder, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoDataOrBuilder> + protoRowsBuilder_; + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return Whether the protoRows field is set. + */ + public boolean hasProtoRows() { + return rowsCase_ == 4; + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + * + * @return The protoRows. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + getProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + rows_; + } + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return protoRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + protoRowsBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder setProtoRows( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData.Builder + builderForValue) { + if (protoRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + protoRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder mergeProtoRows( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData value) { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .newBuilder( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoData) + rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + protoRowsBuilder_.mergeFrom(value); + } + protoRowsBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public Builder clearProtoRows() { + if (protoRowsBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + protoRowsBuilder_.clear(); + } + return this; + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData.Builder + getProtoRowsBuilder() { + return getProtoRowsFieldBuilder().getBuilder(); + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder + getProtoRowsOrBuilder() { + if ((rowsCase_ == 4) && (protoRowsBuilder_ != null)) { + return protoRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + rows_; + } + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + } + /** + * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .Builder, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoDataOrBuilder> + getProtoRowsFieldBuilder() { + if (protoRowsBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .getDefaultInstance(); + } + protoRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData + .Builder, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + .ProtoDataOrBuilder>( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) + rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + ; + return protoRowsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AppendRowsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface AppendRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The row offset at which the last append occurred.
+     * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + long getOffset(); + + /** + * + * + *
+     * Error in case of append failure. If set, it means rows are not accepted
+     * into the system. Users can retry within the same connection.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + boolean hasError(); + /** + * + * + *
+     * Error in case of append failure. If set, it means rows are not accepted
+     * into the system. Users can retry within the same connection.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + com.google.rpc.Status getError(); + /** + * + * + *
+     * Error in case of append failure. If set, it means rows are not accepted
+     * into the system. Users can retry within the same connection.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + com.google.rpc.StatusOrBuilder getErrorOrBuilder(); + + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.ResponseCase + getResponseCase(); + } + /** + * + * + *
+   * Response message for `AppendRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse} + */ + public static final class AppendRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) + AppendRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use AppendRowsResponse.newBuilder() to construct. + private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AppendRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AppendRowsResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AppendRowsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + responseCase_ = 1; + response_ = input.readInt64(); + break; + } + case 18: + { + com.google.rpc.Status.Builder subBuilder = null; + if (responseCase_ == 2) { + subBuilder = ((com.google.rpc.Status) response_).toBuilder(); + } + response_ = input.readMessage(com.google.rpc.Status.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.rpc.Status) response_); + response_ = subBuilder.buildPartial(); + } + responseCase_ = 2; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.Builder.class); + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public enum ResponseCase implements com.google.protobuf.Internal.EnumLite { + OFFSET(1), + ERROR(2), + RESPONSE_NOT_SET(0); + private final int value; + + private ResponseCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ResponseCase valueOf(int value) { + return forNumber(value); + } + + public static ResponseCase forNumber(int value) { + switch (value) { + case 1: + return OFFSET; + case 2: + return ERROR; + case 0: + return RESPONSE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public static final int OFFSET_FIELD_NUMBER = 1; + /** + * + * + *
+     * The row offset at which the last append occurred.
+     * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + public long getOffset() { + if (responseCase_ == 1) { + return (java.lang.Long) response_; + } + return 0L; + } + + public static final int ERROR_FIELD_NUMBER = 2; + /** + * + * + *
+     * Error in case of append failure. If set, it means rows are not accepted
+     * into the system. Users can retry within the same connection.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + public boolean hasError() { + return responseCase_ == 2; + } + /** + * + * + *
+     * Error in case of append failure. If set, it means rows are not accepted
+     * into the system. Users can retry within the same connection.
+     * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + public com.google.rpc.Status getError() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + /** + * + * + *
+     * Error in case of append failure. If set, it means rows are not accepted
+     * into the system. Users can retry within the same connection.
+     * 
+ * + * .google.rpc.Status error = 2; + */ + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (responseCase_ == 1) { + output.writeInt64(1, (long) ((java.lang.Long) response_)); + } + if (responseCase_ == 2) { + output.writeMessage(2, (com.google.rpc.Status) response_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (responseCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeInt64Size( + 1, (long) ((java.lang.Long) response_)); + } + if (responseCase_ == 2) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 2, (com.google.rpc.Status) response_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) obj; + + if (!getResponseCase().equals(other.getResponseCase())) return false; + switch (responseCase_) { + case 1: + if (getOffset() != other.getOffset()) return false; + break; + case 2: + if (!getError().equals(other.getError())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (responseCase_) { + case 1: + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + break; + case 2: + hash = (37 * hash) + ERROR_FIELD_NUMBER; + hash = (53 * hash) + getError().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Response message for `AppendRows`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + responseCase_ = 0; + response_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse(this); + if (responseCase_ == 1) { + result.response_ = response_; + } + if (responseCase_ == 2) { + if (errorBuilder_ == null) { + result.response_ = response_; + } else { + result.response_ = errorBuilder_.build(); + } + } + result.responseCase_ = responseCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + .getDefaultInstance()) return this; + switch (other.getResponseCase()) { + case OFFSET: + { + setOffset(other.getOffset()); + break; + } + case ERROR: + { + mergeError(other.getError()); + break; + } + case RESPONSE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int responseCase_ = 0; + private java.lang.Object response_; + + public ResponseCase getResponseCase() { + return ResponseCase.forNumber(responseCase_); + } + + public Builder clearResponse() { + responseCase_ = 0; + response_ = null; + onChanged(); + return this; + } + + /** + * + * + *
+       * The row offset at which the last append occurred.
+       * 
+ * + * int64 offset = 1; + * + * @return The offset. + */ + public long getOffset() { + if (responseCase_ == 1) { + return (java.lang.Long) response_; + } + return 0L; + } + /** + * + * + *
+       * The row offset at which the last append occurred.
+       * 
+ * + * int64 offset = 1; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + responseCase_ = 1; + response_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * The row offset at which the last append occurred.
+       * 
+ * + * int64 offset = 1; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + if (responseCase_ == 1) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + errorBuilder_; + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + * + * @return Whether the error field is set. + */ + public boolean hasError() { + return responseCase_ == 2; + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + * + * @return The error. + */ + public com.google.rpc.Status getError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } else { + if (responseCase_ == 2) { + return errorBuilder_.getMessage(); + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + response_ = value; + onChanged(); + } else { + errorBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder setError(com.google.rpc.Status.Builder builderForValue) { + if (errorBuilder_ == null) { + response_ = builderForValue.build(); + onChanged(); + } else { + errorBuilder_.setMessage(builderForValue.build()); + } + responseCase_ = 2; + return this; + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder mergeError(com.google.rpc.Status value) { + if (errorBuilder_ == null) { + if (responseCase_ == 2 && response_ != com.google.rpc.Status.getDefaultInstance()) { + response_ = + com.google.rpc.Status.newBuilder((com.google.rpc.Status) response_) + .mergeFrom(value) + .buildPartial(); + } else { + response_ = value; + } + onChanged(); + } else { + if (responseCase_ == 2) { + errorBuilder_.mergeFrom(value); + } + errorBuilder_.setMessage(value); + } + responseCase_ = 2; + return this; + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + public Builder clearError() { + if (errorBuilder_ == null) { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + onChanged(); + } + } else { + if (responseCase_ == 2) { + responseCase_ = 0; + response_ = null; + } + errorBuilder_.clear(); + } + return this; + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + public com.google.rpc.Status.Builder getErrorBuilder() { + return getErrorFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { + if ((responseCase_ == 2) && (errorBuilder_ != null)) { + return errorBuilder_.getMessageOrBuilder(); + } else { + if (responseCase_ == 2) { + return (com.google.rpc.Status) response_; + } + return com.google.rpc.Status.getDefaultInstance(); + } + } + /** + * + * + *
+       * Error in case of append failure. If set, it means rows are not accepted
+       * into the system. Users can retry within the same connection.
+       * 
+ * + * .google.rpc.Status error = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + getErrorFieldBuilder() { + if (errorBuilder_ == null) { + if (!(responseCase_ == 2)) { + response_ = com.google.rpc.Status.getDefaultInstance(); + } + errorBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>( + (com.google.rpc.Status) response_, getParentForChildren(), isClean()); + response_ = null; + } + responseCase_ = 2; + onChanged(); + ; + return errorBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AppendRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AppendRowsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface GetWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + } + /** + * + * + *
+   * Request message for `GetWriteStreamRequest`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest} + */ + public static final class GetWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) + GetWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use GetWriteStreamRequest.newBuilder() to construct. + private GetWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private GetWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new GetWriteStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private GetWriteStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.Builder + .class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+     * Required. Name of the stream to get, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Request message for `GetWriteStreamRequest`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest parsedMessage = + null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+       * Required. Name of the stream to get, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Required. Name of the stream to get, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Required. Name of the stream to get, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Name of the stream to get, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Name of the stream to get, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public GetWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetWriteStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface BatchCommitWriteStreamsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + java.util.List getWriteStreamsList(); + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + int getWriteStreamsCount(); + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + java.lang.String getWriteStreams(int index); + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + com.google.protobuf.ByteString getWriteStreamsBytes(int index); + } + /** + * + * + *
+   * Request message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest} + */ + public static final class BatchCommitWriteStreamsRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) + BatchCommitWriteStreamsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use BatchCommitWriteStreamsRequest.newBuilder() to construct. + private BatchCommitWriteStreamsRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsRequest() { + parent_ = ""; + writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private BatchCommitWriteStreamsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + writeStreams_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + writeStreams_.add(s); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + writeStreams_ = writeStreams_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + .Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+     * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int WRITE_STREAMS_FIELD_NUMBER = 2; + private com.google.protobuf.LazyStringList writeStreams_; + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + return writeStreams_; + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + /** + * + * + *
+     * Required. The group of streams that will be committed atomically.
+     * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the value to return. + * @return The bytes of the writeStreams at the given index. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + for (int i = 0; i < writeStreams_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, writeStreams_.getRaw(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + { + int dataSize = 0; + for (int i = 0; i < writeStreams_.size(); i++) { + dataSize += computeStringSizeNoTag(writeStreams_.getRaw(i)); + } + size += dataSize; + size += 1 * getWriteStreamsList().size(); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (!getWriteStreamsList().equals(other.getWriteStreamsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (getWriteStreamsCount() > 0) { + hash = (37 * hash) + WRITE_STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getWriteStreamsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Request message for `BatchCommitWriteStreams`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest( + this); + int from_bitField0_ = bitField0_; + result.parent_ = parent_; + if (((bitField0_ & 0x00000001) != 0)) { + writeStreams_ = writeStreams_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.writeStreams_ = writeStreams_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (!other.writeStreams_.isEmpty()) { + if (writeStreams_.isEmpty()) { + writeStreams_ = other.writeStreams_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureWriteStreamsIsMutable(); + writeStreams_.addAll(other.writeStreams_); + } + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object parent_ = ""; + /** + * + * + *
+       * Required. Parent table that all the streams should belong to, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Required. Parent table that all the streams should belong to, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Required. Parent table that all the streams should belong to, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Parent table that all the streams should belong to, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Parent table that all the streams should belong to, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`.
+       * 
+ * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.LazyStringList writeStreams_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureWriteStreamsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + writeStreams_ = new com.google.protobuf.LazyStringArrayList(writeStreams_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return A list containing the writeStreams. + */ + public com.google.protobuf.ProtocolStringList getWriteStreamsList() { + return writeStreams_.getUnmodifiableView(); + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The count of writeStreams. + */ + public int getWriteStreamsCount() { + return writeStreams_.size(); + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index of the element to return. + * @return The writeStreams at the given index. + */ + public java.lang.String getWriteStreams(int index) { + return writeStreams_.get(index); + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the writeStreams to add. + */ + public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { + return writeStreams_.getByteString(index); + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param index The index to set the value at. + * @param value The writeStreams to set. + * @return This builder for chaining. + */ + public Builder setWriteStreams(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreams(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param values The writeStreams to add. + * @return This builder for chaining. + */ + public Builder addAllWriteStreams(java.lang.Iterable values) { + ensureWriteStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, writeStreams_); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearWriteStreams() { + writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The group of streams that will be committed atomically.
+       * 
+ * + * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes of the writeStreams to add. + * @return This builder for chaining. + */ + public Builder addWriteStreamsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureWriteStreamsIsMutable(); + writeStreams_.add(value); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCommitWriteStreamsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface BatchCommitWriteStreamsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + } + /** + * + * + *
+   * Response message for `BatchCommitWriteStreams`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse} + */ + public static final class BatchCommitWriteStreamsResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) + BatchCommitWriteStreamsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use BatchCommitWriteStreamsResponse.newBuilder() to construct. + private BatchCommitWriteStreamsResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private BatchCommitWriteStreamsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new BatchCommitWriteStreamsResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private BatchCommitWriteStreamsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (commitTime_ != null) { + subBuilder = commitTime_.toBuilder(); + } + commitTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(commitTime_); + commitTime_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + .Builder.class); + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp commitTime_; + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return commitTime_ != null; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + /** + * + * + *
+     * The time at which streams were committed in microseconds granularity.
+     * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return getCommitTime(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (commitTime_ != null) { + output.writeMessage(1, getCommitTime()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (commitTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTime()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) obj; + + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Response message for `BatchCommitWriteStreams`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) + com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (commitTimeBuilder_ == null) { + commitTime_ = null; + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse( + this); + if (commitTimeBuilder_ == null) { + result.commitTime_ = commitTime_; + } else { + result.commitTime_ = commitTimeBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + .getDefaultInstance()) return this; + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return commitTimeBuilder_ != null || commitTime_ != null; + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + onChanged(); + } else { + commitTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + onChanged(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (commitTime_ != null) { + commitTime_ = + com.google.protobuf.Timestamp.newBuilder(commitTime_) + .mergeFrom(value) + .buildPartial(); + } else { + commitTime_ = value; + } + onChanged(); + } else { + commitTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public Builder clearCommitTime() { + if (commitTimeBuilder_ == null) { + commitTime_ = null; + onChanged(); + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + /** + * + * + *
+       * The time at which streams were committed in microseconds granularity.
+       * 
+ * + * .google.protobuf.Timestamp commit_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage + .BatchCommitWriteStreamsResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public BatchCommitWriteStreamsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new BatchCommitWriteStreamsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface FinalizeWriteStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + } + /** + * + * + *
+   * Request message for invoking `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest} + */ + public static final class FinalizeWriteStreamRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) + FinalizeWriteStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use FinalizeWriteStreamRequest.newBuilder() to construct. + private FinalizeWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FinalizeWriteStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.Builder + .class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+     * Required. Name of the stream to finalize, in the form of
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Request message for invoking `FinalizeWriteStream`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+       * Required. Name of the stream to finalize, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Required. Name of the stream to finalize, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Required. Name of the stream to finalize, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Name of the stream to finalize, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. Name of the stream to finalize, in the form of
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage + .FinalizeWriteStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FinalizeWriteStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface FinalizeWriteStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + long getRowCount(); + } + /** + * + * + *
+   * Response message for `FinalizeWriteStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse} + */ + public static final class FinalizeWriteStreamResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) + FinalizeWriteStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use FinalizeWriteStreamResponse.newBuilder() to construct. + private FinalizeWriteStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private FinalizeWriteStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new FinalizeWriteStreamResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private FinalizeWriteStreamResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + rowCount_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.Builder + .class); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 1; + private long rowCount_; + /** + * + * + *
+     * Number of rows in the finalized stream.
+     * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + public long getRowCount() { + return rowCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (rowCount_ != 0L) { + output.writeInt64(1, rowCount_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, rowCount_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse other = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom(com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Response message for `FinalizeWriteStream`.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + .class, + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + .Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + rowCount_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage + .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + build() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse result = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse( + this); + result.rowCount_ = rowCount_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) + other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + .getDefaultInstance()) return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private long rowCount_; + /** + * + * + *
+       * Number of rows in the finalized stream.
+       * 
+ * + * int64 row_count = 1; + * + * @return The rowCount. + */ + public long getRowCount() { + return rowCount_; + } + /** + * + * + *
+       * Number of rows in the finalized stream.
+       * 
+ * + * int64 row_count = 1; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Number of rows in the finalized stream.
+       * 
+ * + * int64 row_count = 1; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + + rowCount_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) + private static final com.google.cloud.bigquery.storage.v1alpha2.Storage + .FinalizeWriteStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public FinalizeWriteStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FinalizeWriteStreamResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n4google/cloud/bigquery/storage/v1alpha2" + + "/storage.proto\022&google.cloud.bigquery.st" + + "orage.v1alpha2\032\034google/api/annotations.p" + + "roto\032\027google/api/client.proto\032\037google/ap" + + "i/field_behavior.proto\0325google/cloud/big" + + "query/storage/v1alpha2/protobuf.proto\0323g" + + "oogle/cloud/bigquery/storage/v1alpha2/st" + + "ream.proto\032\033google/protobuf/empty.proto\032" + + "\037google/protobuf/timestamp.proto\032\036google" + + "/protobuf/wrappers.proto\032\027google/rpc/sta" + + "tus.proto\"\177\n\030CreateWriteStreamRequest\022\023\n" + + "\006parent\030\001 \001(\tB\003\340A\002\022N\n\014write_stream\030\002 \001(\013" + + "23.google.cloud.bigquery.storage.v1alpha" + + "2.WriteStreamB\003\340A\002\"\336\002\n\021AppendRowsRequest" + + "\022\031\n\014write_stream\030\001 \001(\tB\003\340A\002\0220\n\006offset\030\002 " + + "\001(\0132\033.google.protobuf.Int64ValueB\003\340A\001\022Y\n" + + "\nproto_rows\030\004 \001(\0132C.google.cloud.bigquer" + + "y.storage.v1alpha2.AppendRowsRequest.Pro" + + "toDataH\000\032\230\001\n\tProtoData\022J\n\rwriter_schema\030" + + "\001 \001(\01323.google.cloud.bigquery.storage.v1" + + "alpha2.ProtoSchema\022?\n\004rows\030\002 \001(\01321.googl" + + "e.cloud.bigquery.storage.v1alpha2.ProtoR" + + "owsB\006\n\004rows\"W\n\022AppendRowsResponse\022\020\n\006off" + + "set\030\001 \001(\003H\000\022#\n\005error\030\002 \001(\0132\022.google.rpc." + + "StatusH\000B\n\n\010response\"*\n\025GetWriteStreamRe" + + "quest\022\021\n\004name\030\001 \001(\tB\003\340A\002\"Q\n\036BatchCommitW" + + "riteStreamsRequest\022\023\n\006parent\030\001 \001(\tB\003\340A\002\022" + + "\032\n\rwrite_streams\030\002 \003(\tB\003\340A\002\"R\n\037BatchComm" + + "itWriteStreamsResponse\022/\n\013commit_time\030\001 " + + "\001(\0132\032.google.protobuf.Timestamp\"/\n\032Final" + + "izeWriteStreamRequest\022\021\n\004name\030\001 \001(\tB\003\340A\002" + + "\"0\n\033FinalizeWriteStreamResponse\022\021\n\trow_c" + + "ount\030\001 \001(\0032\201\n\n\rBigQueryWrite\022\310\001\n\021CreateW" + + "riteStream\022@.google.cloud.bigquery.stora" + + "ge.v1alpha2.CreateWriteStreamRequest\0323.g" + + "oogle.cloud.bigquery.storage.v1alpha2.Wr" + + "iteStream\"<\202\323\344\223\0026\"1/v1alpha2/{parent=pro" + + "jects/*/datasets/*/tables/*}:\001*\022\325\001\n\nAppe" + + "ndRows\0229.google.cloud.bigquery.storage.v" + + "1alpha2.AppendRowsRequest\032:.google.cloud" + + ".bigquery.storage.v1alpha2.AppendRowsRes" + + "ponse\"L\202\323\344\223\002F\"A/v1alpha2/{write_stream=p" + + "rojects/*/datasets/*/tables/*/streams/*}" + + ":\001*(\0010\001\022\312\001\n\016GetWriteStream\022=.google.clou" + + "d.bigquery.storage.v1alpha2.GetWriteStre" + + "amRequest\0323.google.cloud.bigquery.storag" + + "e.v1alpha2.WriteStream\"D\202\323\344\223\002>\"9/v1alpha" + + "2/{name=projects/*/datasets/*/tables/*/s" + + "treams/*}:\001*\022\344\001\n\023FinalizeWriteStream\022B.g" + + "oogle.cloud.bigquery.storage.v1alpha2.Fi" + + "nalizeWriteStreamRequest\032C.google.cloud." + + "bigquery.storage.v1alpha2.FinalizeWriteS" + + "treamResponse\"D\202\323\344\223\002>\"9/v1alpha2/{name=p" + + "rojects/*/datasets/*/tables/*/streams/*}" + + ":\001*\022\345\001\n\027BatchCommitWriteStreams\022F.google" + + ".cloud.bigquery.storage.v1alpha2.BatchCo" + + "mmitWriteStreamsRequest\032G.google.cloud.b" + + "igquery.storage.v1alpha2.BatchCommitWrit" + + "eStreamsResponse\"9\202\323\344\223\0023\0221/v1alpha2/{par" + + "ent=projects/*/datasets/*/tables/*}\032\260\001\312A" + + "\036bigquerystorage.googleapis.com\322A\213\001https" + + "://www.googleapis.com/auth/bigquery,http" + + "s://www.googleapis.com/auth/bigquery.ins" + + "ertdata,https://www.googleapis.com/auth/" + + "cloud-platformB{\n*com.google.cloud.bigqu" + + "ery.storage.v1alpha2ZMgoogle.golang.org/" + + "genproto/googleapis/cloud/bigquery/stora" + + "ge/v1alpha2;storageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1alpha2.Stream.getDescriptor(), + com.google.protobuf.EmptyProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + com.google.protobuf.WrappersProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStream", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor, + new java.lang.String[] { + "WriteStream", "Offset", "ProtoRows", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor = + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor, + new java.lang.String[] { + "WriterSchema", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor, + new java.lang.String[] { + "Offset", "Error", "Response", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor, + new java.lang.String[] { + "Parent", "WriteStreams", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor, + new java.lang.String[] { + "CommitTime", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor, + new java.lang.String[] { + "Name", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor, + new java.lang.String[] { + "RowCount", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.oauthScopes); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1alpha2.Stream.getDescriptor(); + com.google.protobuf.EmptyProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + com.google.protobuf.WrappersProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java new file mode 100644 index 0000000000..944a94d456 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java @@ -0,0 +1,2361 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha2/stream.proto + +package com.google.cloud.bigquery.storage.v1alpha2; + +public final class Stream { + private Stream() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface WriteStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.WriteStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getType(); + + /** + * + * + *
+     * Output only. Create time of the stream.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + boolean hasCreateTime(); + /** + * + * + *
+     * Output only. Create time of the stream.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + com.google.protobuf.Timestamp getCreateTime(); + /** + * + * + *
+     * Output only. Create time of the stream.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); + + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + boolean hasCommitTime(); + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + com.google.protobuf.Timestamp getCommitTime(); + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); + + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + boolean hasTableSchema(); + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getTableSchema(); + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder getTableSchemaOrBuilder(); + + /** + * + * + *
+     * Id set by client to annotate its identity.
+     * 
+ * + * string external_id = 6; + * + * @return The externalId. + */ + java.lang.String getExternalId(); + /** + * + * + *
+     * Id set by client to annotate its identity.
+     * 
+ * + * string external_id = 6; + * + * @return The bytes for externalId. + */ + com.google.protobuf.ByteString getExternalIdBytes(); + } + /** + * + * + *
+   * Information about a single stream that gets data inside the storage system.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.WriteStream} + */ + public static final class WriteStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.WriteStream) + WriteStreamOrBuilder { + private static final long serialVersionUID = 0L; + // Use WriteStream.newBuilder() to construct. + private WriteStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private WriteStream() { + name_ = ""; + type_ = 0; + externalId_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new WriteStream(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private WriteStream( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: + { + int rawValue = input.readEnum(); + + type_ = rawValue; + break; + } + case 26: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (createTime_ != null) { + subBuilder = createTime_.toBuilder(); + } + createTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(createTime_); + createTime_ = subBuilder.buildPartial(); + } + + break; + } + case 34: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (commitTime_ != null) { + subBuilder = commitTime_.toBuilder(); + } + commitTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(commitTime_); + commitTime_ = subBuilder.buildPartial(); + } + + break; + } + case 42: + { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder subBuilder = + null; + if (tableSchema_ != null) { + subBuilder = tableSchema_.toBuilder(); + } + tableSchema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableSchema_); + tableSchema_ = subBuilder.buildPartial(); + } + + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + + externalId_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream + .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream + .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder.class); + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1alpha2.WriteStream.Type} */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * Unknown type.
+       * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+       * Data will commit automatically and appear as soon as the write is
+       * acknowledged.
+       * 
+ * + * COMMITTED = 1; + */ + COMMITTED(1), + /** + * + * + *
+       * Data is invisible until the stream is committed.
+       * 
+ * + * PENDING = 2; + */ + PENDING(2), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+       * Unknown type.
+       * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+       * Data will commit automatically and appear as soon as the write is
+       * acknowledged.
+       * 
+ * + * COMMITTED = 1; + */ + public static final int COMMITTED_VALUE = 1; + /** + * + * + *
+       * Data is invisible until the stream is committed.
+       * 
+ * + * PENDING = 2; + */ + public static final int PENDING_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return COMMITTED; + case 2: + return PENDING; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1alpha2.WriteStream.Type) + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_; + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + public int getTypeValue() { + return type_; + } + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.UNRECOGNIZED + : result; + } + + public static final int CREATE_TIME_FIELD_NUMBER = 3; + private com.google.protobuf.Timestamp createTime_; + /** + * + * + *
+     * Output only. Create time of the stream.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return createTime_ != null; + } + /** + * + * + *
+     * Output only. Create time of the stream.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; + } + /** + * + * + *
+     * Output only. Create time of the stream.
+     * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + return getCreateTime(); + } + + public static final int COMMIT_TIME_FIELD_NUMBER = 4; + private com.google.protobuf.Timestamp commitTime_; + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return commitTime_ != null; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; + } + /** + * + * + *
+     * Output only. Commit time of the stream.
+     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+     * means it is not committed.
+     * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + return getCommitTime(); + } + + public static final int TABLE_SCHEMA_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema tableSchema_; + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + public boolean hasTableSchema() { + return tableSchema_ != null; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getTableSchema() { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() + : tableSchema_; + } + /** + * + * + *
+     * Output only. The schema of the destination table. It is only returned in
+     * `CreateWriteStream` response. Caller should generate data that's
+     * compatible with this schema to send in initial `AppendRowsRequest`.
+     * The table schema could go out of date during the life time of the stream.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder + getTableSchemaOrBuilder() { + return getTableSchema(); + } + + public static final int EXTERNAL_ID_FIELD_NUMBER = 6; + private volatile java.lang.Object externalId_; + /** + * + * + *
+     * Id set by client to annotate its identity.
+     * 
+ * + * string external_id = 6; + * + * @return The externalId. + */ + public java.lang.String getExternalId() { + java.lang.Object ref = externalId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + externalId_ = s; + return s; + } + } + /** + * + * + *
+     * Id set by client to annotate its identity.
+     * 
+ * + * string external_id = 6; + * + * @return The bytes for externalId. + */ + public com.google.protobuf.ByteString getExternalIdBytes() { + java.lang.Object ref = externalId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + externalId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (createTime_ != null) { + output.writeMessage(3, getCreateTime()); + } + if (commitTime_ != null) { + output.writeMessage(4, getCommitTime()); + } + if (tableSchema_ != null) { + output.writeMessage(5, getTableSchema()); + } + if (!getExternalIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, externalId_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (createTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); + } + if (commitTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCommitTime()); + } + if (tableSchema_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getTableSchema()); + } + if (!getExternalIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, externalId_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream other = + (com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (hasCreateTime() != other.hasCreateTime()) return false; + if (hasCreateTime()) { + if (!getCreateTime().equals(other.getCreateTime())) return false; + } + if (hasCommitTime() != other.hasCommitTime()) return false; + if (hasCommitTime()) { + if (!getCommitTime().equals(other.getCommitTime())) return false; + } + if (hasTableSchema() != other.hasTableSchema()) return false; + if (hasTableSchema()) { + if (!getTableSchema().equals(other.getTableSchema())) return false; + } + if (!getExternalId().equals(other.getExternalId())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + if (hasCreateTime()) { + hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCreateTime().hashCode(); + } + if (hasCommitTime()) { + hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getCommitTime().hashCode(); + } + if (hasTableSchema()) { + hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getTableSchema().hashCode(); + } + hash = (37 * hash) + EXTERNAL_ID_FIELD_NUMBER; + hash = (53 * hash) + getExternalId().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Information about a single stream that gets data inside the storage system.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.WriteStream} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.WriteStream) + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream + .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream + .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + type_ = 0; + + if (createTimeBuilder_ == null) { + createTime_ = null; + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + if (commitTimeBuilder_ == null) { + commitTime_ = null; + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + if (tableSchemaBuilder_ == null) { + tableSchema_ = null; + } else { + tableSchema_ = null; + tableSchemaBuilder_ = null; + } + externalId_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream + .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream build() { + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream result = + new com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream(this); + result.name_ = name_; + result.type_ = type_; + if (createTimeBuilder_ == null) { + result.createTime_ = createTime_; + } else { + result.createTime_ = createTimeBuilder_.build(); + } + if (commitTimeBuilder_ == null) { + result.commitTime_ = commitTime_; + } else { + result.commitTime_ = commitTimeBuilder_.build(); + } + if (tableSchemaBuilder_ == null) { + result.tableSchema_ = tableSchema_; + } else { + result.tableSchema_ = tableSchemaBuilder_.build(); + } + result.externalId_ = externalId_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.hasCreateTime()) { + mergeCreateTime(other.getCreateTime()); + } + if (other.hasCommitTime()) { + mergeCommitTime(other.getCommitTime()); + } + if (other.hasTableSchema()) { + mergeTableSchema(other.getTableSchema()); + } + if (!other.getExternalId().isEmpty()) { + externalId_ = other.externalId_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+       * Output only. Name of the stream, in the form
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Output only. Name of the stream, in the form
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Output only. Name of the stream, in the form
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Output only. Name of the stream, in the form
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+       * Output only. Name of the stream, in the form
+       * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int type_ = 0; + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for type. + */ + public int getTypeValue() { + return type_; + } + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + onChanged(); + return this; + } + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The type. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type result = + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.UNRECOGNIZED + : result; + } + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType( + com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type value) { + if (value == null) { + throw new NullPointerException(); + } + + type_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + + type_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp createTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + createTimeBuilder_; + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the createTime field is set. + */ + public boolean hasCreateTime() { + return createTimeBuilder_ != null || createTime_ != null; + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The createTime. + */ + public com.google.protobuf.Timestamp getCreateTime() { + if (createTimeBuilder_ == null) { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } else { + return createTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + createTime_ = value; + onChanged(); + } else { + createTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (createTimeBuilder_ == null) { + createTime_ = builderForValue.build(); + onChanged(); + } else { + createTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { + if (createTimeBuilder_ == null) { + if (createTime_ != null) { + createTime_ = + com.google.protobuf.Timestamp.newBuilder(createTime_) + .mergeFrom(value) + .buildPartial(); + } else { + createTime_ = value; + } + onChanged(); + } else { + createTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCreateTime() { + if (createTimeBuilder_ == null) { + createTime_ = null; + onChanged(); + } else { + createTime_ = null; + createTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { + + onChanged(); + return getCreateTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { + if (createTimeBuilder_ != null) { + return createTimeBuilder_.getMessageOrBuilder(); + } else { + return createTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : createTime_; + } + } + /** + * + * + *
+       * Output only. Create time of the stream.
+       * 
+ * + * + * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCreateTimeFieldBuilder() { + if (createTimeBuilder_ == null) { + createTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCreateTime(), getParentForChildren(), isClean()); + createTime_ = null; + } + return createTimeBuilder_; + } + + private com.google.protobuf.Timestamp commitTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + commitTimeBuilder_; + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the commitTime field is set. + */ + public boolean hasCommitTime() { + return commitTimeBuilder_ != null || commitTime_ != null; + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The commitTime. + */ + public com.google.protobuf.Timestamp getCommitTime() { + if (commitTimeBuilder_ == null) { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } else { + return commitTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + commitTime_ = value; + onChanged(); + } else { + commitTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (commitTimeBuilder_ == null) { + commitTime_ = builderForValue.build(); + onChanged(); + } else { + commitTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { + if (commitTimeBuilder_ == null) { + if (commitTime_ != null) { + commitTime_ = + com.google.protobuf.Timestamp.newBuilder(commitTime_) + .mergeFrom(value) + .buildPartial(); + } else { + commitTime_ = value; + } + onChanged(); + } else { + commitTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearCommitTime() { + if (commitTimeBuilder_ == null) { + commitTime_ = null; + onChanged(); + } else { + commitTime_ = null; + commitTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { + + onChanged(); + return getCommitTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { + if (commitTimeBuilder_ != null) { + return commitTimeBuilder_.getMessageOrBuilder(); + } else { + return commitTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : commitTime_; + } + } + /** + * + * + *
+       * Output only. Commit time of the stream.
+       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
+       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
+       * means it is not committed.
+       * 
+ * + * + * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getCommitTimeFieldBuilder() { + if (commitTimeBuilder_ == null) { + commitTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getCommitTime(), getParentForChildren(), isClean()); + commitTime_ = null; + } + return commitTimeBuilder_; + } + + private com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema tableSchema_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder> + tableSchemaBuilder_; + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the tableSchema field is set. + */ + public boolean hasTableSchema() { + return tableSchemaBuilder_ != null || tableSchema_ != null; + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The tableSchema. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getTableSchema() { + if (tableSchemaBuilder_ == null) { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() + : tableSchema_; + } else { + return tableSchemaBuilder_.getMessage(); + } + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableSchema_ = value; + onChanged(); + } else { + tableSchemaBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setTableSchema( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder builderForValue) { + if (tableSchemaBuilder_ == null) { + tableSchema_ = builderForValue.build(); + onChanged(); + } else { + tableSchemaBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeTableSchema( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema value) { + if (tableSchemaBuilder_ == null) { + if (tableSchema_ != null) { + tableSchema_ = + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.newBuilder( + tableSchema_) + .mergeFrom(value) + .buildPartial(); + } else { + tableSchema_ = value; + } + onChanged(); + } else { + tableSchemaBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearTableSchema() { + if (tableSchemaBuilder_ == null) { + tableSchema_ = null; + onChanged(); + } else { + tableSchema_ = null; + tableSchemaBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder + getTableSchemaBuilder() { + + onChanged(); + return getTableSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder + getTableSchemaOrBuilder() { + if (tableSchemaBuilder_ != null) { + return tableSchemaBuilder_.getMessageOrBuilder(); + } else { + return tableSchema_ == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() + : tableSchema_; + } + } + /** + * + * + *
+       * Output only. The schema of the destination table. It is only returned in
+       * `CreateWriteStream` response. Caller should generate data that's
+       * compatible with this schema to send in initial `AppendRowsRequest`.
+       * The table schema could go out of date during the life time of the stream.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder> + getTableSchemaFieldBuilder() { + if (tableSchemaBuilder_ == null) { + tableSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder>( + getTableSchema(), getParentForChildren(), isClean()); + tableSchema_ = null; + } + return tableSchemaBuilder_; + } + + private java.lang.Object externalId_ = ""; + /** + * + * + *
+       * Id set by client to annotate its identity.
+       * 
+ * + * string external_id = 6; + * + * @return The externalId. + */ + public java.lang.String getExternalId() { + java.lang.Object ref = externalId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + externalId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Id set by client to annotate its identity.
+       * 
+ * + * string external_id = 6; + * + * @return The bytes for externalId. + */ + public com.google.protobuf.ByteString getExternalIdBytes() { + java.lang.Object ref = externalId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + externalId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Id set by client to annotate its identity.
+       * 
+ * + * string external_id = 6; + * + * @param value The externalId to set. + * @return This builder for chaining. + */ + public Builder setExternalId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + externalId_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Id set by client to annotate its identity.
+       * 
+ * + * string external_id = 6; + * + * @return This builder for chaining. + */ + public Builder clearExternalId() { + + externalId_ = getDefaultInstance().getExternalId(); + onChanged(); + return this; + } + /** + * + * + *
+       * Id set by client to annotate its identity.
+       * 
+ * + * string external_id = 6; + * + * @param value The bytes for externalId to set. + * @return This builder for chaining. + */ + public Builder setExternalIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + externalId_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.WriteStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.WriteStream) + private static final com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public WriteStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new WriteStream(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n3google/cloud/bigquery/storage/v1alpha2" + + "/stream.proto\022&google.cloud.bigquery.sto" + + "rage.v1alpha2\032\037google/api/field_behavior" + + ".proto\0322google/cloud/bigquery/storage/v1" + + "alpha2/table.proto\032\037google/protobuf/time" + + "stamp.proto\"\370\002\n\013WriteStream\022\021\n\004name\030\001 \001(" + + "\tB\003\340A\003\022K\n\004type\030\002 \001(\01628.google.cloud.bigq" + + "uery.storage.v1alpha2.WriteStream.TypeB\003" + + "\340A\005\0224\n\013create_time\030\003 \001(\0132\032.google.protob" + + "uf.TimestampB\003\340A\003\0224\n\013commit_time\030\004 \001(\0132\032" + + ".google.protobuf.TimestampB\003\340A\003\022N\n\014table" + + "_schema\030\005 \001(\01323.google.cloud.bigquery.st" + + "orage.v1alpha2.TableSchemaB\003\340A\003\022\023\n\013exter" + + "nal_id\030\006 \001(\t\"8\n\004Type\022\024\n\020TYPE_UNSPECIFIED" + + "\020\000\022\r\n\tCOMMITTED\020\001\022\013\n\007PENDING\020\002B{\n*com.go" + + "ogle.cloud.bigquery.storage.v1alpha2ZMgo" + + "ogle.golang.org/genproto/googleapis/clou" + + "d/bigquery/storage/v1alpha2;storageb\006pro" + + "to3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1alpha2.Table.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor, + new java.lang.String[] { + "Name", "Type", "CreateTime", "CommitTime", "TableSchema", "ExternalId", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1alpha2.Table.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java new file mode 100644 index 0000000000..0375a1acad --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java @@ -0,0 +1,3527 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1alpha2/table.proto + +package com.google.cloud.bigquery.storage.v1alpha2; + +public final class Table { + private Table() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + public interface TableSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.TableSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + java.util.List + getFieldsList(); + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index); + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + int getFieldsCount(); + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList(); + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index); + } + /** + * + * + *
+   * Schema of a table
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableSchema} + */ + public static final class TableSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.TableSchema) + TableSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use TableSchema.newBuilder() to construct. + private TableSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableSchema() { + fields_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TableSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(); + mutable_bitField0_ |= 0x00000001; + } + fields_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.parser(), + extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.class, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder.class); + } + + public static final int FIELDS_FIELD_NUMBER = 1; + private java.util.List + fields_; + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsList() { + return fields_; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + return fields_; + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public int getFieldsCount() { + return fields_.size(); + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + /** + * + * + *
+     * Describes the fields in a table.
+     * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder + getFieldsOrBuilder(int index) { + return fields_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(1, fields_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, fields_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema other = + (com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) obj; + + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Schema of a table
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableSchema} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.TableSchema) + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.class, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + fieldsBuilder_.clear(); + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema build() { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema result = + new com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema(this); + int from_bitField0_ = bitField0_; + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance()) + return this; + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.util.List + fields_ = java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public java.util.List + getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder setFields( + int index, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder addFields( + int index, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder addAllFields( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema> + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + getFieldsBuilder(int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder + getFieldsOrBuilder(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + .getDefaultInstance()); + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + addFieldsBuilder(int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + .getDefaultInstance()); + } + /** + * + * + *
+       * Describes the fields in a table.
+       * 
+ * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder> + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.TableSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.TableSchema) + private static final com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TableFieldSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + int getTypeValue(); + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type getType(); + + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + int getModeValue(); + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode getMode(); + + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List + getFieldsList(); + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index); + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + int getFieldsCount(); + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList(); + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder getFieldsOrBuilder( + int index); + + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + java.lang.String getDescription(); + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + com.google.protobuf.ByteString getDescriptionBytes(); + } + /** + * + * + *
+   * A field in TableSchema
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema} + */ + public static final class TableFieldSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) + TableFieldSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use TableFieldSchema.newBuilder() to construct. + private TableFieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableFieldSchema() { + name_ = ""; + type_ = 0; + mode_ = 0; + fields_ = java.util.Collections.emptyList(); + description_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableFieldSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TableFieldSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 16: + { + int rawValue = input.readEnum(); + + type_ = rawValue; + break; + } + case 24: + { + int rawValue = input.readEnum(); + + mode_ = rawValue; + break; + } + case 34: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(); + mutable_bitField0_ |= 0x00000001; + } + fields_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.parser(), + extensionRegistry)); + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + + description_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder.class); + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type} */ + public enum Type implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * Illegal value
+       * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + TYPE_UNSPECIFIED(0), + /** + * + * + *
+       * 64K, UTF8
+       * 
+ * + * STRING = 1; + */ + STRING(1), + /** + * + * + *
+       * 64-bit signed
+       * 
+ * + * INT64 = 2; + */ + INT64(2), + /** + * + * + *
+       * 64-bit IEEE floating point
+       * 
+ * + * DOUBLE = 3; + */ + DOUBLE(3), + /** + * + * + *
+       * Aggregate type
+       * 
+ * + * STRUCT = 4; + */ + STRUCT(4), + /** + * + * + *
+       * 64K, Binary
+       * 
+ * + * BYTES = 5; + */ + BYTES(5), + /** + * + * + *
+       * 2-valued
+       * 
+ * + * BOOL = 6; + */ + BOOL(6), + /** + * + * + *
+       * 64-bit signed usec since UTC epoch
+       * 
+ * + * TIMESTAMP = 7; + */ + TIMESTAMP(7), + /** + * + * + *
+       * Civil date - Year, Month, Day
+       * 
+ * + * DATE = 8; + */ + DATE(8), + /** + * + * + *
+       * Civil time - Hour, Minute, Second, Microseconds
+       * 
+ * + * TIME = 9; + */ + TIME(9), + /** + * + * + *
+       * Combination of civil date and civil time
+       * 
+ * + * DATETIME = 10; + */ + DATETIME(10), + /** + * + * + *
+       * Geography object (go/googlesql_geography)
+       * 
+ * + * GEOGRAPHY = 11; + */ + GEOGRAPHY(11), + /** + * + * + *
+       * Numeric value (go/googlesql_numeric)
+       * 
+ * + * NUMERIC = 12; + */ + NUMERIC(12), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+       * Illegal value
+       * 
+ * + * TYPE_UNSPECIFIED = 0; + */ + public static final int TYPE_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+       * 64K, UTF8
+       * 
+ * + * STRING = 1; + */ + public static final int STRING_VALUE = 1; + /** + * + * + *
+       * 64-bit signed
+       * 
+ * + * INT64 = 2; + */ + public static final int INT64_VALUE = 2; + /** + * + * + *
+       * 64-bit IEEE floating point
+       * 
+ * + * DOUBLE = 3; + */ + public static final int DOUBLE_VALUE = 3; + /** + * + * + *
+       * Aggregate type
+       * 
+ * + * STRUCT = 4; + */ + public static final int STRUCT_VALUE = 4; + /** + * + * + *
+       * 64K, Binary
+       * 
+ * + * BYTES = 5; + */ + public static final int BYTES_VALUE = 5; + /** + * + * + *
+       * 2-valued
+       * 
+ * + * BOOL = 6; + */ + public static final int BOOL_VALUE = 6; + /** + * + * + *
+       * 64-bit signed usec since UTC epoch
+       * 
+ * + * TIMESTAMP = 7; + */ + public static final int TIMESTAMP_VALUE = 7; + /** + * + * + *
+       * Civil date - Year, Month, Day
+       * 
+ * + * DATE = 8; + */ + public static final int DATE_VALUE = 8; + /** + * + * + *
+       * Civil time - Hour, Minute, Second, Microseconds
+       * 
+ * + * TIME = 9; + */ + public static final int TIME_VALUE = 9; + /** + * + * + *
+       * Combination of civil date and civil time
+       * 
+ * + * DATETIME = 10; + */ + public static final int DATETIME_VALUE = 10; + /** + * + * + *
+       * Geography object (go/googlesql_geography)
+       * 
+ * + * GEOGRAPHY = 11; + */ + public static final int GEOGRAPHY_VALUE = 11; + /** + * + * + *
+       * Numeric value (go/googlesql_numeric)
+       * 
+ * + * NUMERIC = 12; + */ + public static final int NUMERIC_VALUE = 12; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Type valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Type forNumber(int value) { + switch (value) { + case 0: + return TYPE_UNSPECIFIED; + case 1: + return STRING; + case 2: + return INT64; + case 3: + return DOUBLE; + case 4: + return STRUCT; + case 5: + return BYTES; + case 6: + return BOOL; + case 7: + return TIMESTAMP; + case 8: + return DATE; + case 9: + return TIME; + case 10: + return DATETIME; + case 11: + return GEOGRAPHY; + case 12: + return NUMERIC; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Type(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type) + } + + /** Protobuf enum {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode} */ + public enum Mode implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * Illegal value
+       * 
+ * + * MODE_UNSPECIFIED = 0; + */ + MODE_UNSPECIFIED(0), + /** NULLABLE = 1; */ + NULLABLE(1), + /** REQUIRED = 2; */ + REQUIRED(2), + /** REPEATED = 3; */ + REPEATED(3), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+       * Illegal value
+       * 
+ * + * MODE_UNSPECIFIED = 0; + */ + public static final int MODE_UNSPECIFIED_VALUE = 0; + /** NULLABLE = 1; */ + public static final int NULLABLE_VALUE = 1; + /** REQUIRED = 2; */ + public static final int REQUIRED_VALUE = 2; + /** REPEATED = 3; */ + public static final int REPEATED_VALUE = 3; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static Mode valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static Mode forNumber(int value) { + switch (value) { + case 0: + return MODE_UNSPECIFIED; + case 1: + return NULLABLE; + case 2: + return REQUIRED; + case 3: + return REPEATED; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Mode findValueByNumber(int number) { + return Mode.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.getDescriptor() + .getEnumTypes() + .get(1); + } + + private static final Mode[] VALUES = values(); + + public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private Mode(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode) + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+     * Required. The field name. The name must contain only letters (a-z, A-Z),
+     * numbers (0-9), or underscores (_), and must start with a letter or
+     * underscore. The maximum length is 128 characters.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TYPE_FIELD_NUMBER = 2; + private int type_; + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + public int getTypeValue() { + return type_; + } + /** + * + * + *
+     * Required. The field data type.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + + public static final int MODE_FIELD_NUMBER = 3; + private int mode_; + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + public int getModeValue() { + return mode_; + } + /** + * + * + *
+     * Optional. The field mode. The default value is NULLABLE.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode getMode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.valueOf(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + + public static final int FIELDS_FIELD_NUMBER = 4; + private java.util.List + fields_; + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsList() { + return fields_; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + return fields_; + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + return fields_.size(); + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index) { + return fields_.get(index); + } + /** + * + * + *
+     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder + getFieldsOrBuilder(int index) { + return fields_.get(index); + } + + public static final int DESCRIPTION_FIELD_NUMBER = 6; + private volatile java.lang.Object description_; + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } + } + /** + * + * + *
+     * Optional. The field description. The maximum length is 1,024 characters.
+     * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + output.writeEnum(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + output.writeMessage(4, fields_.get(i)); + } + if (!getDescriptionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, description_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (type_ + != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.TYPE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); + } + if (mode_ + != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.MODE_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, mode_); + } + for (int i = 0; i < fields_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, fields_.get(i)); + } + if (!getDescriptionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, description_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema other = + (com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) obj; + + if (!getName().equals(other.getName())) return false; + if (type_ != other.type_) return false; + if (mode_ != other.mode_) return false; + if (!getFieldsList().equals(other.getFieldsList())) return false; + if (!getDescription().equals(other.getDescription())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + type_; + hash = (37 * hash) + MODE_FIELD_NUMBER; + hash = (53 * hash) + mode_; + if (getFieldsCount() > 0) { + hash = (37 * hash) + FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getFieldsList().hashCode(); + } + hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; + hash = (53 * hash) + getDescription().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * A field in TableSchema
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.class, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getFieldsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + type_ = 0; + + mode_ = 0; + + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + fieldsBuilder_.clear(); + } + description_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Table + .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema build() { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema buildPartial() { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema result = + new com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema(this); + int from_bitField0_ = bitField0_; + result.name_ = name_; + result.type_ = type_; + result.mode_ = mode_; + if (fieldsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + fields_ = java.util.Collections.unmodifiableList(fields_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.fields_ = fields_; + } else { + result.fields_ = fieldsBuilder_.build(); + } + result.description_ = description_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema other) { + if (other + == com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + .getDefaultInstance()) return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.type_ != 0) { + setTypeValue(other.getTypeValue()); + } + if (other.mode_ != 0) { + setModeValue(other.getModeValue()); + } + if (fieldsBuilder_ == null) { + if (!other.fields_.isEmpty()) { + if (fields_.isEmpty()) { + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureFieldsIsMutable(); + fields_.addAll(other.fields_); + } + onChanged(); + } + } else { + if (!other.fields_.isEmpty()) { + if (fieldsBuilder_.isEmpty()) { + fieldsBuilder_.dispose(); + fieldsBuilder_ = null; + fields_ = other.fields_; + bitField0_ = (bitField0_ & ~0x00000001); + fieldsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getFieldsFieldBuilder() + : null; + } else { + fieldsBuilder_.addAllMessages(other.fields_); + } + } + } + if (!other.getDescription().isEmpty()) { + description_ = other.description_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+       * Required. The field name. The name must contain only letters (a-z, A-Z),
+       * numbers (0-9), or underscores (_), and must start with a letter or
+       * underscore. The maximum length is 128 characters.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Required. The field name. The name must contain only letters (a-z, A-Z),
+       * numbers (0-9), or underscores (_), and must start with a letter or
+       * underscore. The maximum length is 128 characters.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Required. The field name. The name must contain only letters (a-z, A-Z),
+       * numbers (0-9), or underscores (_), and must start with a letter or
+       * underscore. The maximum length is 128 characters.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The field name. The name must contain only letters (a-z, A-Z),
+       * numbers (0-9), or underscores (_), and must start with a letter or
+       * underscore. The maximum length is 128 characters.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The field name. The name must contain only letters (a-z, A-Z),
+       * numbers (0-9), or underscores (_), and must start with a letter or
+       * underscore. The maximum length is 128 characters.
+       * 
+ * + * string name = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private int type_ = 0; + /** + * + * + *
+       * Required. The field data type.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The enum numeric value on the wire for type. + */ + public int getTypeValue() { + return type_; + } + /** + * + * + *
+       * Required. The field data type.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The enum numeric value on the wire for type to set. + * @return This builder for chaining. + */ + public Builder setTypeValue(int value) { + type_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The field data type.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The type. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type getType() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type result = + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.valueOf(type_); + return result == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.UNRECOGNIZED + : result; + } + /** + * + * + *
+       * Required. The field data type.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @param value The type to set. + * @return This builder for chaining. + */ + public Builder setType( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type value) { + if (value == null) { + throw new NullPointerException(); + } + + type_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+       * Required. The field data type.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return This builder for chaining. + */ + public Builder clearType() { + + type_ = 0; + onChanged(); + return this; + } + + private int mode_ = 0; + /** + * + * + *
+       * Optional. The field mode. The default value is NULLABLE.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The enum numeric value on the wire for mode. + */ + public int getModeValue() { + return mode_; + } + /** + * + * + *
+       * Optional. The field mode. The default value is NULLABLE.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The enum numeric value on the wire for mode to set. + * @return This builder for chaining. + */ + public Builder setModeValue(int value) { + mode_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. The field mode. The default value is NULLABLE.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The mode. + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode getMode() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode result = + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.valueOf(mode_); + return result == null + ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.UNRECOGNIZED + : result; + } + /** + * + * + *
+       * Optional. The field mode. The default value is NULLABLE.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @param value The mode to set. + * @return This builder for chaining. + */ + public Builder setMode( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode value) { + if (value == null) { + throw new NullPointerException(); + } + + mode_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. The field mode. The default value is NULLABLE.
+       * 
+ * + * + * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return This builder for chaining. + */ + public Builder clearMode() { + + mode_ = 0; + onChanged(); + return this; + } + + private java.util.List + fields_ = java.util.Collections.emptyList(); + + private void ensureFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + fields_ = + new java.util.ArrayList< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(fields_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + fieldsBuilder_; + + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List + getFieldsList() { + if (fieldsBuilder_ == null) { + return java.util.Collections.unmodifiableList(fields_); + } else { + return fieldsBuilder_.getMessageList(); + } + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public int getFieldsCount() { + if (fieldsBuilder_ == null) { + return fields_.size(); + } else { + return fieldsBuilder_.getCount(); + } + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields( + int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessage(index); + } + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.set(index, value); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setFields( + int index, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.set(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(value); + onChanged(); + } else { + fieldsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { + if (fieldsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureFieldsIsMutable(); + fields_.add(index, value); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addFields( + int index, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + builderForValue) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.add(index, builderForValue.build()); + onChanged(); + } else { + fieldsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder addAllFields( + java.lang.Iterable< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema> + values) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); + onChanged(); + } else { + fieldsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearFields() { + if (fieldsBuilder_ == null) { + fields_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + fieldsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder removeFields(int index) { + if (fieldsBuilder_ == null) { + ensureFieldsIsMutable(); + fields_.remove(index); + onChanged(); + } else { + fieldsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + getFieldsBuilder(int index) { + return getFieldsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder + getFieldsOrBuilder(int index) { + if (fieldsBuilder_ == null) { + return fields_.get(index); + } else { + return fieldsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsOrBuilderList() { + if (fieldsBuilder_ != null) { + return fieldsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(fields_); + } + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + addFieldsBuilder() { + return getFieldsFieldBuilder() + .addBuilder( + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + .getDefaultInstance()); + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder + addFieldsBuilder(int index) { + return getFieldsFieldBuilder() + .addBuilder( + index, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + .getDefaultInstance()); + } + /** + * + * + *
+       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
+       * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public java.util.List< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder> + getFieldsBuilderList() { + return getFieldsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> + getFieldsFieldBuilder() { + if (fieldsBuilder_ == null) { + fieldsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, + com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder>( + fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + fields_ = null; + } + return fieldsBuilder_; + } + + private java.lang.Object description_ = ""; + /** + * + * + *
+       * Optional. The field description. The maximum length is 1,024 characters.
+       * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The description. + */ + public java.lang.String getDescription() { + java.lang.Object ref = description_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + description_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * Optional. The field description. The maximum length is 1,024 characters.
+       * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The bytes for description. + */ + public com.google.protobuf.ByteString getDescriptionBytes() { + java.lang.Object ref = description_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + description_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * Optional. The field description. The maximum length is 1,024 characters.
+       * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The description to set. + * @return This builder for chaining. + */ + public Builder setDescription(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + description_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. The field description. The maximum length is 1,024 characters.
+       * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearDescription() { + + description_ = getDefaultInstance().getDescription(); + onChanged(); + return this; + } + /** + * + * + *
+       * Optional. The field description. The maximum length is 1,024 characters.
+       * 
+ * + * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The bytes for description to set. + * @return This builder for chaining. + */ + public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + description_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) + private static final com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema(); + } + + public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableFieldSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableFieldSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable; + private static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; + private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n2google/cloud/bigquery/storage/v1alpha2" + + "/table.proto\022&google.cloud.bigquery.stor" + + "age.v1alpha2\032\037google/api/field_behavior." + + "proto\"W\n\013TableSchema\022H\n\006fields\030\001 \003(\01328.g" + + "oogle.cloud.bigquery.storage.v1alpha2.Ta" + + "bleFieldSchema\"\252\004\n\020TableFieldSchema\022\021\n\004n" + + "ame\030\001 \001(\tB\003\340A\002\022P\n\004type\030\002 \001(\0162=.google.cl" + + "oud.bigquery.storage.v1alpha2.TableField" + + "Schema.TypeB\003\340A\002\022P\n\004mode\030\003 \001(\0162=.google." + + "cloud.bigquery.storage.v1alpha2.TableFie" + + "ldSchema.ModeB\003\340A\001\022M\n\006fields\030\004 \003(\01328.goo" + + "gle.cloud.bigquery.storage.v1alpha2.Tabl" + + "eFieldSchemaB\003\340A\001\022\030\n\013description\030\006 \001(\tB\003" + + "\340A\001\"\255\001\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\n\n\006ST" + + "RING\020\001\022\t\n\005INT64\020\002\022\n\n\006DOUBLE\020\003\022\n\n\006STRUCT\020" + + "\004\022\t\n\005BYTES\020\005\022\010\n\004BOOL\020\006\022\r\n\tTIMESTAMP\020\007\022\010\n" + + "\004DATE\020\010\022\010\n\004TIME\020\t\022\014\n\010DATETIME\020\n\022\r\n\tGEOGR" + + "APHY\020\013\022\013\n\007NUMERIC\020\014\"F\n\004Mode\022\024\n\020MODE_UNSP" + + "ECIFIED\020\000\022\014\n\010NULLABLE\020\001\022\014\n\010REQUIRED\020\002\022\014\n" + + "\010REPEATED\020\003B{\n*com.google.cloud.bigquery" + + ".storage.v1alpha2ZMgoogle.golang.org/gen" + + "proto/googleapis/cloud/bigquery/storage/" + + "v1alpha2;storageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor, + new java.lang.String[] { + "Fields", + }); + internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor, + new java.lang.String[] { + "Name", "Type", "Mode", "Fields", "Description", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto new file mode 100644 index 0000000000..4f58b0d932 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto @@ -0,0 +1,44 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1alpha2; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; +option java_outer_classname = "ProtoBufProto"; +option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; + +// Protobuf schema is an API presentation the proto buffer schema. +message ProtoSchema { + // Message descriptor for the data. The descriptor has to be self contained + // to include all the nested type definition, excepted for proto buffer well + // known types + // (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf) + // and zetasql public protos + // (https://github.com/google/zetasql/tree/master/zetasql/public/proto). + google.protobuf.DescriptorProto proto_descriptor = 1; +} + +// Protobuf rows. +message ProtoRows { + // A sequence of rows serialized as a Protocol Buffer. + // + // See https://developers.google.com/protocol-buffers/docs/overview for more + // information on deserializing this field. + repeated bytes serialized_rows = 1; +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto new file mode 100644 index 0000000000..e93306371d --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto @@ -0,0 +1,191 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1alpha2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/storage/v1alpha2/protobuf.proto"; +import "google/cloud/bigquery/storage/v1alpha2/stream.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "google/rpc/status.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; + +// Request message for `CreateWriteStream`. +message CreateWriteStreamRequest { + // Required. Reference to the table to which the stream belongs, in the format + // of `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`. + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Stream to be created. + WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for `AppendRows`. +message AppendRowsRequest { + message ProtoData { + // Proto schema used to serialize the data. + ProtoSchema writer_schema = 1; + + // Serialized row data in protobuf message format. + ProtoRows rows = 2; + } + + // Required. The stream that is the target of the append operation. This value must be + // specified for the initial request. If subsequent requests specify the + // stream name, it must equal to the value provided in the first request. + string write_stream = 1 [(google.api.field_behavior) = REQUIRED]; + + // Optional. If present, the write is only performed if the next append offset is same + // as the provided value. If not present, the write is performed at the + // current end of stream. + google.protobuf.Int64Value offset = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Input rows. The `writer_schema` field must be specified at the initial + // request and currently, it will be ignored if specified in following + // requests. Following requests must have data in the same format as the + // initial request. + oneof rows { + ProtoData proto_rows = 4; + } +} + +// Response message for `AppendRows`. +message AppendRowsResponse { + oneof response { + // The row offset at which the last append occurred. + int64 offset = 1; + + // Error in case of append failure. If set, it means rows are not accepted + // into the system. Users can retry within the same connection. + google.rpc.Status error = 2; + } +} + +// Request message for `GetWriteStreamRequest`. +message GetWriteStreamRequest { + // Required. Name of the stream to get, in the form of + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsRequest { + // Required. Parent table that all the streams should belong to, in the form of + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}`. + string parent = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The group of streams that will be committed atomically. + repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for `BatchCommitWriteStreams`. +message BatchCommitWriteStreamsResponse { + // The time at which streams were committed in microseconds granularity. + google.protobuf.Timestamp commit_time = 1; +} + +// Request message for invoking `FinalizeWriteStream`. +message FinalizeWriteStreamRequest { + // Required. Name of the stream to finalize, in the form of + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for `FinalizeWriteStream`. +message FinalizeWriteStreamResponse { + // Number of rows in the finalized stream. + int64 row_count = 1; +} + +// BigQuery Write API. +// +// The Write API can be used to write data to BigQuery. +service BigQueryWrite { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.insertdata," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a write stream to the given table. + rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" + body: "*" + }; + } + + // Appends data to the given stream. + // + // If `offset` is specified, the `offset` is checked against the end of + // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an + // attempt is made to append to an offset beyond the current end of the stream + // or `ALREADY_EXISTS` if user provids an `offset` that has already been + // written to. User can retry with adjusted offset within the same RPC + // stream. If `offset` is not specified, append happens at the end of the + // stream. + // + // The response contains the offset at which the append happened. Responses + // are received in the same order in which requests are sent. There will be + // one response for each successful request. If the `offset` is not set in + // response, it means append didn't happen due to some errors. If one request + // fails, all the subsequent requests will also fail until a success request + // is made again. + // + // If the stream is of `PENDING` type, data will only be available for read + // operations after the stream is committed. + rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { + option (google.api.http) = { + post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + } + + // Gets a write stream. + rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { + option (google.api.http) = { + post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + } + + // Finalize a write stream so that no new data can be appended to the + // stream. + rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { + option (google.api.http) = { + post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" + body: "*" + }; + } + + // Atomically commits a group of `PENDING` streams that belong to the same + // `parent` table. + // Streams must be finalized before commit and cannot be committed multiple + // times. Once a stream is committed, data in the stream becomes available + // for read operations. + rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { + option (google.api.http) = { + get: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" + }; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto new file mode 100644 index 0000000000..a50822d1ac --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto @@ -0,0 +1,64 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1alpha2; + +import "google/api/field_behavior.proto"; +import "google/cloud/bigquery/storage/v1alpha2/table.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; + +// Information about a single stream that gets data inside the storage system. +message WriteStream { + enum Type { + // Unknown type. + TYPE_UNSPECIFIED = 0; + + // Data will commit automatically and appear as soon as the write is + // acknowledged. + COMMITTED = 1; + + // Data is invisible until the stream is committed. + PENDING = 2; + } + + // Output only. Name of the stream, in the form + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. Create time of the stream. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Commit time of the stream. + // If a stream is of `COMMITTED` type, then it will have a commit_time same as + // `create_time`. If the stream is of `PENDING` type, commit_time being empty + // means it is not committed. + google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The schema of the destination table. It is only returned in + // `CreateWriteStream` response. Caller should generate data that's + // compatible with this schema to send in initial `AppendRowsRequest`. + // The table schema could go out of date during the life time of the stream. + TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Id set by client to annotate its identity. + string external_id = 6; +} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto new file mode 100644 index 0000000000..fb1dc4aacd --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto @@ -0,0 +1,101 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1alpha2; + +import "google/api/field_behavior.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; +option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; + +// Schema of a table +message TableSchema { + // Describes the fields in a table. + repeated TableFieldSchema fields = 1; +} + +// A field in TableSchema +message TableFieldSchema { + enum Type { + // Illegal value + TYPE_UNSPECIFIED = 0; + + // 64K, UTF8 + STRING = 1; + + // 64-bit signed + INT64 = 2; + + // 64-bit IEEE floating point + DOUBLE = 3; + + // Aggregate type + STRUCT = 4; + + // 64K, Binary + BYTES = 5; + + // 2-valued + BOOL = 6; + + // 64-bit signed usec since UTC epoch + TIMESTAMP = 7; + + // Civil date - Year, Month, Day + DATE = 8; + + // Civil time - Hour, Minute, Second, Microseconds + TIME = 9; + + // Combination of civil date and civil time + DATETIME = 10; + + // Geography object (go/googlesql_geography) + GEOGRAPHY = 11; + + // Numeric value (go/googlesql_numeric) + NUMERIC = 12; + } + + enum Mode { + // Illegal value + MODE_UNSPECIFIED = 0; + + NULLABLE = 1; + + REQUIRED = 2; + + REPEATED = 3; + } + + // Required. The field name. The name must contain only letters (a-z, A-Z), + // numbers (0-9), or underscores (_), and must start with a letter or + // underscore. The maximum length is 128 characters. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The field data type. + Type type = 2 [(google.api.field_behavior) = REQUIRED]; + + // Optional. The field mode. The default value is NULLABLE. + Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Describes the nested schema fields if the type property is set to STRUCT. + repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The field description. The maximum length is 1,024 characters. + string description = 6 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml new file mode 100644 index 0000000000..a4a66794ca --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -0,0 +1,25 @@ + + 4.0.0 + com.google.api.grpc + proto-google-cloud-bigquerystorage-v1beta2 + 0.85.2-SNAPSHOT + proto-google-cloud-bigquerystorage-v1beta2 + PROTO library for proto-google-cloud-bigquerystorage-v1beta2 + + com.google.cloud + google-cloud-bigquerystorage-parent + 0.120.2-beta-SNAPSHOT + + + + com.google.protobuf + protobuf-java + + + com.google.api.grpc + proto-google-common-protos + + + \ No newline at end of file diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java new file mode 100644 index 0000000000..3899501267 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java @@ -0,0 +1,79 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public final class ArrowProto { + private ArrowProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n1google/cloud/bigquery/storage/v1beta2/" + + "arrow.proto\022%google.cloud.bigquery.stora" + + "ge.v1beta2\"(\n\013ArrowSchema\022\031\n\021serialized_" + + "schema\030\001 \001(\014\"3\n\020ArrowRecordBatch\022\037\n\027seri" + + "alized_record_batch\030\001 \001(\014B\207\001\n)com.google" + + ".cloud.bigquery.storage.v1beta2B\nArrowPr" + + "otoP\001ZLgoogle.golang.org/genproto/google" + + "apis/cloud/bigquery/storage/v1beta2;stor" + + "ageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor, + new java.lang.String[] { + "SerializedSchema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor, + new java.lang.String[] { + "SerializedRecordBatch", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java new file mode 100644 index 0000000000..3849749889 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java @@ -0,0 +1,551 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Arrow RecordBatch.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch} + */ +public final class ArrowRecordBatch extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + ArrowRecordBatchOrBuilder { + private static final long serialVersionUID = 0L; + // Use ArrowRecordBatch.newBuilder() to construct. + private ArrowRecordBatch(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowRecordBatch() { + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowRecordBatch(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ArrowRecordBatch( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedRecordBatch_ = input.readBytes(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder.class); + } + + public static final int SERIALIZED_RECORD_BATCH_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedRecordBatch_; + /** + * + * + *
+   * IPC-serialized Arrow RecordBatch.
+   * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedRecordBatch_.isEmpty()) { + output.writeBytes(1, serializedRecordBatch_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedRecordBatch_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedRecordBatch_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch other = + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) obj; + + if (!getSerializedRecordBatch().equals(other.getSerializedRecordBatch())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getSerializedRecordBatch().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Arrow RecordBatch.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + serializedRecordBatch_ = com.google.protobuf.ByteString.EMPTY; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowRecordBatch_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch build() { + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch result = + new com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch(this); + result.serializedRecordBatch_ = serializedRecordBatch_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance()) + return this; + if (other.getSerializedRecordBatch() != com.google.protobuf.ByteString.EMPTY) { + setSerializedRecordBatch(other.getSerializedRecordBatch()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.ByteString serializedRecordBatch_ = + com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + public com.google.protobuf.ByteString getSerializedRecordBatch() { + return serializedRecordBatch_; + } + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @param value The serializedRecordBatch to set. + * @return This builder for chaining. + */ + public Builder setSerializedRecordBatch(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + + serializedRecordBatch_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * IPC-serialized Arrow RecordBatch.
+     * 
+ * + * bytes serialized_record_batch = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedRecordBatch() { + + serializedRecordBatch_ = getDefaultInstance().getSerializedRecordBatch(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + private static final com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowRecordBatch parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ArrowRecordBatch(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java new file mode 100644 index 0000000000..925d8960ed --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ArrowRecordBatchOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * IPC-serialized Arrow RecordBatch.
+   * 
+ * + * bytes serialized_record_batch = 1; + * + * @return The serializedRecordBatch. + */ + com.google.protobuf.ByteString getSerializedRecordBatch(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java new file mode 100644 index 0000000000..a97bf1b418 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java @@ -0,0 +1,558 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Arrow schema as specified in
+ * https://arrow.apache.org/docs/python/api/datatypes.html
+ * and serialized to bytes using IPC:
+ * https://arrow.apache.org/docs/ipc.html.
+ * See code samples on how this message can be deserialized.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowSchema} + */ +public final class ArrowSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + ArrowSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use ArrowSchema.newBuilder() to construct. + private ArrowSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ArrowSchema() { + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ArrowSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ArrowSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedSchema_ = input.readBytes(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder.class); + } + + public static final int SERIALIZED_SCHEMA_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedSchema_; + /** + * + * + *
+   * IPC serialized Arrow schema.
+   * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedSchema_.isEmpty()) { + output.writeBytes(1, serializedSchema_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedSchema_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedSchema_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema other = + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) obj; + + if (!getSerializedSchema().equals(other.getSerializedSchema())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSerializedSchema().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Arrow schema as specified in
+   * https://arrow.apache.org/docs/python/api/datatypes.html
+   * and serialized to bytes using IPC:
+   * https://arrow.apache.org/docs/ipc.html.
+   * See code samples on how this message can be deserialized.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ArrowSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.class, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ArrowSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema build() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema result = + new com.google.cloud.bigquery.storage.v1beta2.ArrowSchema(this); + result.serializedSchema_ = serializedSchema_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance()) + return this; + if (other.getSerializedSchema() != com.google.protobuf.ByteString.EMPTY) { + setSerializedSchema(other.getSerializedSchema()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.ByteString serializedSchema_ = com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + public com.google.protobuf.ByteString getSerializedSchema() { + return serializedSchema_; + } + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @param value The serializedSchema to set. + * @return This builder for chaining. + */ + public Builder setSerializedSchema(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + + serializedSchema_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * IPC serialized Arrow schema.
+     * 
+ * + * bytes serialized_schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedSchema() { + + serializedSchema_ = getDefaultInstance().getSerializedSchema(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.ArrowSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ArrowSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ArrowSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ArrowSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java new file mode 100644 index 0000000000..0efaa1b6b1 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/arrow.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ArrowSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ArrowSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * IPC serialized Arrow schema.
+   * 
+ * + * bytes serialized_schema = 1; + * + * @return The serializedSchema. + */ + com.google.protobuf.ByteString getSerializedSchema(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java new file mode 100644 index 0000000000..799ef8fad4 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java @@ -0,0 +1,78 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public final class AvroProto { + private AvroProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n0google/cloud/bigquery/storage/v1beta2/" + + "avro.proto\022%google.cloud.bigquery.storag" + + "e.v1beta2\"\034\n\nAvroSchema\022\016\n\006schema\030\001 \001(\t\"" + + "*\n\010AvroRows\022\036\n\026serialized_binary_rows\030\001 " + + "\001(\014B\206\001\n)com.google.cloud.bigquery.storag" + + "e.v1beta2B\tAvroProtoP\001ZLgoogle.golang.or" + + "g/genproto/googleapis/cloud/bigquery/sto" + + "rage/v1beta2;storageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor, + new java.lang.String[] { + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor, + new java.lang.String[] { + "SerializedBinaryRows", + }); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java new file mode 100644 index 0000000000..0caeefe574 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java @@ -0,0 +1,550 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Avro rows.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroRows} + */ +public final class AvroRows extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AvroRows) + AvroRowsOrBuilder { + private static final long serialVersionUID = 0L; + // Use AvroRows.newBuilder() to construct. + private AvroRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroRows() { + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroRows(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AvroRows( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + serializedBinaryRows_ = input.readBytes(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.class, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder.class); + } + + public static final int SERIALIZED_BINARY_ROWS_FIELD_NUMBER = 1; + private com.google.protobuf.ByteString serializedBinaryRows_; + /** + * + * + *
+   * Binary serialized rows in a block.
+   * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!serializedBinaryRows_.isEmpty()) { + output.writeBytes(1, serializedBinaryRows_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!serializedBinaryRows_.isEmpty()) { + size += com.google.protobuf.CodedOutputStream.computeBytesSize(1, serializedBinaryRows_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AvroRows)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AvroRows other = + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) obj; + + if (!getSerializedBinaryRows().equals(other.getSerializedBinaryRows())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SERIALIZED_BINARY_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getSerializedBinaryRows().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.AvroRows prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Avro rows.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroRows} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AvroRows) + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.class, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.AvroRows.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + serializedBinaryRows_ = com.google.protobuf.ByteString.EMPTY; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroRows_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows build() { + com.google.cloud.bigquery.storage.v1beta2.AvroRows result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AvroRows result = + new com.google.cloud.bigquery.storage.v1beta2.AvroRows(this); + result.serializedBinaryRows_ = serializedBinaryRows_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.AvroRows) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AvroRows) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.AvroRows other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance()) + return this; + if (other.getSerializedBinaryRows() != com.google.protobuf.ByteString.EMPTY) { + setSerializedBinaryRows(other.getSerializedBinaryRows()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.AvroRows parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.ByteString serializedBinaryRows_ = + com.google.protobuf.ByteString.EMPTY; + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + public com.google.protobuf.ByteString getSerializedBinaryRows() { + return serializedBinaryRows_; + } + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @param value The serializedBinaryRows to set. + * @return This builder for chaining. + */ + public Builder setSerializedBinaryRows(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + + serializedBinaryRows_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Binary serialized rows in a block.
+     * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return This builder for chaining. + */ + public Builder clearSerializedBinaryRows() { + + serializedBinaryRows_ = getDefaultInstance().getSerializedBinaryRows(); + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AvroRows) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AvroRows) + private static final com.google.cloud.bigquery.storage.v1beta2.AvroRows DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.AvroRows(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroRows getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroRows parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AvroRows(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java new file mode 100644 index 0000000000..04aefb5227 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java @@ -0,0 +1,38 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface AvroRowsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AvroRows) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Binary serialized rows in a block.
+   * 
+ * + * bytes serialized_binary_rows = 1; + * + * @return The serializedBinaryRows. + */ + com.google.protobuf.ByteString getSerializedBinaryRows(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java new file mode 100644 index 0000000000..90a5e07767 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java @@ -0,0 +1,641 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Avro schema.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroSchema} + */ +public final class AvroSchema extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.AvroSchema) + AvroSchemaOrBuilder { + private static final long serialVersionUID = 0L; + // Use AvroSchema.newBuilder() to construct. + private AvroSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private AvroSchema() { + schema_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new AvroSchema(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private AvroSchema( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + schema_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.class, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder.class); + } + + public static final int SCHEMA_FIELD_NUMBER = 1; + private volatile java.lang.Object schema_; + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The schema. + */ + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } + } + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getSchemaBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, schema_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getSchemaBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, schema_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.AvroSchema)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.AvroSchema other = + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) obj; + + if (!getSchema().equals(other.getSchema())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getSchema().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.AvroSchema prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Avro schema.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AvroSchema} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.AvroSchema) + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.class, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.AvroSchema.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + schema_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroProto + .internal_static_google_cloud_bigquery_storage_v1beta2_AvroSchema_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema build() { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema result = + new com.google.cloud.bigquery.storage.v1beta2.AvroSchema(this); + result.schema_ = schema_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.AvroSchema) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AvroSchema) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.AvroSchema other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance()) + return this; + if (!other.getSchema().isEmpty()) { + schema_ = other.schema_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object schema_ = ""; + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return The schema. + */ + public java.lang.String getSchema() { + java.lang.Object ref = schema_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + schema_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + public com.google.protobuf.ByteString getSchemaBytes() { + java.lang.Object ref = schema_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + schema_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @param value The schema to set. + * @return This builder for chaining. + */ + public Builder setSchema(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + schema_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @return This builder for chaining. + */ + public Builder clearSchema() { + + schema_ = getDefaultInstance().getSchema(); + onChanged(); + return this; + } + /** + * + * + *
+     * Json serialized schema, as described at
+     * https://avro.apache.org/docs/1.8.1/spec.html.
+     * 
+ * + * string schema = 1; + * + * @param value The bytes for schema to set. + * @return This builder for chaining. + */ + public Builder setSchemaBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + schema_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.AvroSchema) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.AvroSchema) + private static final com.google.cloud.bigquery.storage.v1beta2.AvroSchema DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.AvroSchema(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.AvroSchema getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public AvroSchema parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AvroSchema(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java new file mode 100644 index 0000000000..03ac8890e5 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/avro.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface AvroSchemaOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.AvroSchema) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The schema. + */ + java.lang.String getSchema(); + /** + * + * + *
+   * Json serialized schema, as described at
+   * https://avro.apache.org/docs/1.8.1/spec.html.
+   * 
+ * + * string schema = 1; + * + * @return The bytes for schema. + */ + com.google.protobuf.ByteString getSchemaBytes(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java new file mode 100644 index 0000000000..94fc772165 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java @@ -0,0 +1,1080 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `CreateReadSession`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest} + */ +public final class CreateReadSessionRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + CreateReadSessionRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use CreateReadSessionRequest.newBuilder() to construct. + private CreateReadSessionRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private CreateReadSessionRequest() { + parent_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new CreateReadSessionRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private CreateReadSessionRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + parent_ = s; + break; + } + case 18: + { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder subBuilder = null; + if (readSession_ != null) { + subBuilder = readSession_.toBuilder(); + } + readSession_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(readSession_); + readSession_ = subBuilder.buildPartial(); + } + + break; + } + case 24: + { + maxStreamCount_ = input.readInt32(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.Builder.class); + } + + public static final int PARENT_FIELD_NUMBER = 1; + private volatile java.lang.Object parent_; + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } + } + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int READ_SESSION_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.ReadSession readSession_; + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + public boolean hasReadSession() { + return readSession_ != null; + } + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getReadSession() { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder getReadSessionOrBuilder() { + return getReadSession(); + } + + public static final int MAX_STREAM_COUNT_FIELD_NUMBER = 3; + private int maxStreamCount_; + /** + * + * + *
+   * Max initial number of streams. If unset or zero, the server will
+   * provide a value of streams so as to produce reasonable throughput. Must be
+   * non-negative. The number of streams may be lower than the requested number,
+   * depending on the amount parallelism that is reasonable for the table. Error
+   * will be returned if the max count is greater than the current system
+   * max limit of 1,000.
+   * Streams must be read starting from offset 0.
+   * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + public int getMaxStreamCount() { + return maxStreamCount_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getParentBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); + } + if (readSession_ != null) { + output.writeMessage(2, getReadSession()); + } + if (maxStreamCount_ != 0) { + output.writeInt32(3, maxStreamCount_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getParentBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); + } + if (readSession_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getReadSession()); + } + if (maxStreamCount_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, maxStreamCount_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest other = + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) obj; + + if (!getParent().equals(other.getParent())) return false; + if (hasReadSession() != other.hasReadSession()) return false; + if (hasReadSession()) { + if (!getReadSession().equals(other.getReadSession())) return false; + } + if (getMaxStreamCount() != other.getMaxStreamCount()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + PARENT_FIELD_NUMBER; + hash = (53 * hash) + getParent().hashCode(); + if (hasReadSession()) { + hash = (37 * hash) + READ_SESSION_FIELD_NUMBER; + hash = (53 * hash) + getReadSession().hashCode(); + } + hash = (37 * hash) + MAX_STREAM_COUNT_FIELD_NUMBER; + hash = (53 * hash) + getMaxStreamCount(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `CreateReadSession`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.class, + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + parent_ = ""; + + if (readSessionBuilder_ == null) { + readSession_ = null; + } else { + readSession_ = null; + readSessionBuilder_ = null; + } + maxStreamCount_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest build() { + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest result = + new com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest(this); + result.parent_ = parent_; + if (readSessionBuilder_ == null) { + result.readSession_ = readSession_; + } else { + result.readSession_ = readSessionBuilder_.build(); + } + result.maxStreamCount_ = maxStreamCount_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + .getDefaultInstance()) return this; + if (!other.getParent().isEmpty()) { + parent_ = other.parent_; + onChanged(); + } + if (other.hasReadSession()) { + mergeReadSession(other.getReadSession()); + } + if (other.getMaxStreamCount() != 0) { + setMaxStreamCount(other.getMaxStreamCount()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object parent_ = ""; + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + public java.lang.String getParent() { + java.lang.Object ref = parent_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + parent_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + public com.google.protobuf.ByteString getParentBytes() { + java.lang.Object ref = parent_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + parent_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The parent to set. + * @return This builder for chaining. + */ + public Builder setParent(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + parent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearParent() { + + parent_ = getDefaultInstance().getParent(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. The request project that owns the session, in the form of
+     * `projects/{project_id}`.
+     * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for parent to set. + * @return This builder for chaining. + */ + public Builder setParentBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + parent_ = value; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadSession readSession_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder> + readSessionBuilder_; + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + public boolean hasReadSession() { + return readSessionBuilder_ != null || readSession_ != null; + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getReadSession() { + if (readSessionBuilder_ == null) { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } else { + return readSessionBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadSession(com.google.cloud.bigquery.storage.v1beta2.ReadSession value) { + if (readSessionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readSession_ = value; + onChanged(); + } else { + readSessionBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder setReadSession( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder builderForValue) { + if (readSessionBuilder_ == null) { + readSession_ = builderForValue.build(); + onChanged(); + } else { + readSessionBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder mergeReadSession(com.google.cloud.bigquery.storage.v1beta2.ReadSession value) { + if (readSessionBuilder_ == null) { + if (readSession_ != null) { + readSession_ = + com.google.cloud.bigquery.storage.v1beta2.ReadSession.newBuilder(readSession_) + .mergeFrom(value) + .buildPartial(); + } else { + readSession_ = value; + } + onChanged(); + } else { + readSessionBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public Builder clearReadSession() { + if (readSessionBuilder_ == null) { + readSession_ = null; + onChanged(); + } else { + readSession_ = null; + readSessionBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder getReadSessionBuilder() { + + onChanged(); + return getReadSessionFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder + getReadSessionOrBuilder() { + if (readSessionBuilder_ != null) { + return readSessionBuilder_.getMessageOrBuilder(); + } else { + return readSession_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance() + : readSession_; + } + } + /** + * + * + *
+     * Required. Session to be created.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder> + getReadSessionFieldBuilder() { + if (readSessionBuilder_ == null) { + readSessionBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder>( + getReadSession(), getParentForChildren(), isClean()); + readSession_ = null; + } + return readSessionBuilder_; + } + + private int maxStreamCount_; + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table. Error
+     * will be returned if the max count is greater than the current system
+     * max limit of 1,000.
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + public int getMaxStreamCount() { + return maxStreamCount_; + } + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table. Error
+     * will be returned if the max count is greater than the current system
+     * max limit of 1,000.
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @param value The maxStreamCount to set. + * @return This builder for chaining. + */ + public Builder setMaxStreamCount(int value) { + + maxStreamCount_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Max initial number of streams. If unset or zero, the server will
+     * provide a value of streams so as to produce reasonable throughput. Must be
+     * non-negative. The number of streams may be lower than the requested number,
+     * depending on the amount parallelism that is reasonable for the table. Error
+     * will be returned if the max count is greater than the current system
+     * max limit of 1,000.
+     * Streams must be read starting from offset 0.
+     * 
+ * + * int32 max_stream_count = 3; + * + * @return This builder for chaining. + */ + public Builder clearMaxStreamCount() { + + maxStreamCount_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public CreateReadSessionRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CreateReadSessionRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java new file mode 100644 index 0000000000..3ce93c86fd --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface CreateReadSessionRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.CreateReadSessionRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The parent. + */ + java.lang.String getParent(); + /** + * + * + *
+   * Required. The request project that owns the session, in the form of
+   * `projects/{project_id}`.
+   * 
+ * + * + * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for parent. + */ + com.google.protobuf.ByteString getParentBytes(); + + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return Whether the readSession field is set. + */ + boolean hasReadSession(); + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * + * @return The readSession. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession getReadSession(); + /** + * + * + *
+   * Required. Session to be created.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession read_session = 2 [(.google.api.field_behavior) = REQUIRED]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder getReadSessionOrBuilder(); + + /** + * + * + *
+   * Max initial number of streams. If unset or zero, the server will
+   * provide a value of streams so as to produce reasonable throughput. Must be
+   * non-negative. The number of streams may be lower than the requested number,
+   * depending on the amount parallelism that is reasonable for the table. Error
+   * will be returned if the max count is greater than the current system
+   * max limit of 1,000.
+   * Streams must be read starting from offset 0.
+   * 
+ * + * int32 max_stream_count = 3; + * + * @return The maxStreamCount. + */ + int getMaxStreamCount(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java new file mode 100644 index 0000000000..41b1be9cd9 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java @@ -0,0 +1,162 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Data format for input or output data.
+ * 
+ * + * Protobuf enum {@code google.cloud.bigquery.storage.v1beta2.DataFormat} + */ +public enum DataFormat implements com.google.protobuf.ProtocolMessageEnum { + /** DATA_FORMAT_UNSPECIFIED = 0; */ + DATA_FORMAT_UNSPECIFIED(0), + /** + * + * + *
+   * Avro is a standard open source row based file format.
+   * See https://avro.apache.org/ for more details.
+   * 
+ * + * AVRO = 1; + */ + AVRO(1), + /** + * + * + *
+   * Arrow is a standard open source column-based message format.
+   * See https://arrow.apache.org/ for more details.
+   * 
+ * + * ARROW = 2; + */ + ARROW(2), + UNRECOGNIZED(-1), + ; + + /** DATA_FORMAT_UNSPECIFIED = 0; */ + public static final int DATA_FORMAT_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+   * Avro is a standard open source row based file format.
+   * See https://avro.apache.org/ for more details.
+   * 
+ * + * AVRO = 1; + */ + public static final int AVRO_VALUE = 1; + /** + * + * + *
+   * Arrow is a standard open source column-based message format.
+   * See https://arrow.apache.org/ for more details.
+   * 
+ * + * ARROW = 2; + */ + public static final int ARROW_VALUE = 2; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static DataFormat valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static DataFormat forNumber(int value) { + switch (value) { + case 0: + return DATA_FORMAT_UNSPECIFIED; + case 1: + return AVRO; + case 2: + return ARROW; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public DataFormat findValueByNumber(int number) { + return DataFormat.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final DataFormat[] VALUES = values(); + + public static DataFormat valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private DataFormat(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1beta2.DataFormat) +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java new file mode 100644 index 0000000000..fdbb036d09 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java @@ -0,0 +1,731 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `ReadRows`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsRequest} + */ +public final class ReadRowsRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + ReadRowsRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReadRowsRequest.newBuilder() to construct. + private ReadRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsRequest() { + readStream_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReadRowsRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + readStream_ = s; + break; + } + case 16: + { + offset_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.Builder.class); + } + + public static final int READ_STREAM_FIELD_NUMBER = 1; + private volatile java.lang.Object readStream_; + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readStream. + */ + public java.lang.String getReadStream() { + java.lang.Object ref = readStream_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + readStream_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for readStream. + */ + public com.google.protobuf.ByteString getReadStreamBytes() { + java.lang.Object ref = readStream_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + readStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OFFSET_FIELD_NUMBER = 2; + private long offset_; + /** + * + * + *
+   * The offset requested must be less than the last row read from Read.
+   * Requesting a larger offset is undefined. If not specified, start reading
+   * from offset zero.
+   * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + public long getOffset() { + return offset_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getReadStreamBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, readStream_); + } + if (offset_ != 0L) { + output.writeInt64(2, offset_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getReadStreamBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, readStream_); + } + if (offset_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, offset_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest other = + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) obj; + + if (!getReadStream().equals(other.getReadStream())) return false; + if (getOffset() != other.getOffset()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + READ_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getReadStream().hashCode(); + hash = (37 * hash) + OFFSET_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `ReadRows`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + readStream_ = ""; + + offset_ = 0L; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest build() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest result = + new com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest(this); + result.readStream_ = readStream_; + result.offset_ = offset_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest.getDefaultInstance()) + return this; + if (!other.getReadStream().isEmpty()) { + readStream_ = other.readStream_; + onChanged(); + } + if (other.getOffset() != 0L) { + setOffset(other.getOffset()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object readStream_ = ""; + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readStream. + */ + public java.lang.String getReadStream() { + java.lang.Object ref = readStream_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + readStream_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for readStream. + */ + public com.google.protobuf.ByteString getReadStreamBytes() { + java.lang.Object ref = readStream_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + readStream_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The readStream to set. + * @return This builder for chaining. + */ + public Builder setReadStream(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + readStream_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearReadStream() { + + readStream_ = getDefaultInstance().getReadStream(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Stream to read rows from.
+     * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for readStream to set. + * @return This builder for chaining. + */ + public Builder setReadStreamBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + readStream_ = value; + onChanged(); + return this; + } + + private long offset_; + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + public long getOffset() { + return offset_; + } + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @param value The offset to set. + * @return This builder for chaining. + */ + public Builder setOffset(long value) { + + offset_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * The offset requested must be less than the last row read from Read.
+     * Requesting a larger offset is undefined. If not specified, start reading
+     * from offset zero.
+     * 
+ * + * int64 offset = 2; + * + * @return This builder for chaining. + */ + public Builder clearOffset() { + + offset_ = 0L; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadRowsRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsRequest getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java new file mode 100644 index 0000000000..ae2d2dae02 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java @@ -0,0 +1,65 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadRowsRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadRowsRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The readStream. + */ + java.lang.String getReadStream(); + /** + * + * + *
+   * Required. Stream to read rows from.
+   * 
+ * + * string read_stream = 1 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for readStream. + */ + com.google.protobuf.ByteString getReadStreamBytes(); + + /** + * + * + *
+   * The offset requested must be less than the last row read from Read.
+   * Requesting a larger offset is undefined. If not specified, start reading
+   * from offset zero.
+   * 
+ * + * int64 offset = 2; + * + * @return The offset. + */ + long getOffset(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java new file mode 100644 index 0000000000..cf53abc109 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java @@ -0,0 +1,1798 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Response from calling `ReadRows` may include row data, progress and
+ * throttling information.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsResponse} + */ +public final class ReadRowsResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + ReadRowsResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReadRowsResponse.newBuilder() to construct. + private ReadRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadRowsResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadRowsResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReadRowsResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder subBuilder = null; + if (stats_ != null) { + subBuilder = stats_.toBuilder(); + } + stats_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(stats_); + stats_ = subBuilder.buildPartial(); + } + + break; + } + case 26: + { + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder subBuilder = null; + if (rowsCase_ == 3) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_).toBuilder(); + } + rows_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom((com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_); + rows_ = subBuilder.buildPartial(); + } + rowsCase_ = 3; + break; + } + case 34: + { + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder subBuilder = null; + if (rowsCase_ == 4) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_) + .toBuilder(); + } + rows_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_); + rows_ = subBuilder.buildPartial(); + } + rowsCase_ = 4; + break; + } + case 42: + { + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder subBuilder = null; + if (throttleState_ != null) { + subBuilder = throttleState_.toBuilder(); + } + throttleState_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(throttleState_); + throttleState_ = subBuilder.buildPartial(); + } + + break; + } + case 48: + { + rowCount_ = input.readInt64(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.Builder.class); + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public enum RowsCase implements com.google.protobuf.Internal.EnumLite { + AVRO_ROWS(3), + ARROW_RECORD_BATCH(4), + ROWS_NOT_SET(0); + private final int value; + + private RowsCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RowsCase valueOf(int value) { + return forNumber(value); + } + + public static RowsCase forNumber(int value) { + switch (value) { + case 3: + return AVRO_ROWS; + case 4: + return ARROW_RECORD_BATCH; + case 0: + return ROWS_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public static final int AVRO_ROWS_FIELD_NUMBER = 3; + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getAvroRows() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder getAvroRowsOrBuilder() { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + + public static final int ARROW_RECORD_BATCH_FIELD_NUMBER = 4; + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getArrowRecordBatch() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + + public static final int ROW_COUNT_FIELD_NUMBER = 6; + private long rowCount_; + /** + * + * + *
+   * Number of serialized rows in the rows block.
+   * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + public long getRowCount() { + return rowCount_; + } + + public static final int STATS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.StreamStats stats_; + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + public boolean hasStats() { + return stats_ != null; + } + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return The stats. + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getStats() { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder getStatsOrBuilder() { + return getStats(); + } + + public static final int THROTTLE_STATE_FIELD_NUMBER = 5; + private com.google.cloud.bigquery.storage.v1beta2.ThrottleState throttleState_; + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + public boolean hasThrottleState() { + return throttleState_ != null; + } + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState() { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder + getThrottleStateOrBuilder() { + return getThrottleState(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (stats_ != null) { + output.writeMessage(2, getStats()); + } + if (rowsCase_ == 3) { + output.writeMessage(3, (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_); + } + if (rowsCase_ == 4) { + output.writeMessage(4, (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_); + } + if (throttleState_ != null) { + output.writeMessage(5, getThrottleState()); + } + if (rowCount_ != 0L) { + output.writeInt64(6, rowCount_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (stats_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getStats()); + } + if (rowsCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_); + } + if (rowsCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_); + } + if (throttleState_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getThrottleState()); + } + if (rowCount_ != 0L) { + size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, rowCount_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse other = + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) obj; + + if (getRowCount() != other.getRowCount()) return false; + if (hasStats() != other.hasStats()) return false; + if (hasStats()) { + if (!getStats().equals(other.getStats())) return false; + } + if (hasThrottleState() != other.hasThrottleState()) return false; + if (hasThrottleState()) { + if (!getThrottleState().equals(other.getThrottleState())) return false; + } + if (!getRowsCase().equals(other.getRowsCase())) return false; + switch (rowsCase_) { + case 3: + if (!getAvroRows().equals(other.getAvroRows())) return false; + break; + case 4: + if (!getArrowRecordBatch().equals(other.getArrowRecordBatch())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); + if (hasStats()) { + hash = (37 * hash) + STATS_FIELD_NUMBER; + hash = (53 * hash) + getStats().hashCode(); + } + if (hasThrottleState()) { + hash = (37 * hash) + THROTTLE_STATE_FIELD_NUMBER; + hash = (53 * hash) + getThrottleState().hashCode(); + } + switch (rowsCase_) { + case 3: + hash = (37 * hash) + AVRO_ROWS_FIELD_NUMBER; + hash = (53 * hash) + getAvroRows().hashCode(); + break; + case 4: + hash = (37 * hash) + ARROW_RECORD_BATCH_FIELD_NUMBER; + hash = (53 * hash) + getArrowRecordBatch().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response from calling `ReadRows` may include row data, progress and
+   * throttling information.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadRowsResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.class, + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + rowCount_ = 0L; + + if (statsBuilder_ == null) { + stats_ = null; + } else { + stats_ = null; + statsBuilder_ = null; + } + if (throttleStateBuilder_ == null) { + throttleState_ = null; + } else { + throttleState_ = null; + throttleStateBuilder_ = null; + } + rowsCase_ = 0; + rows_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse build() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse result = + new com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse(this); + if (rowsCase_ == 3) { + if (avroRowsBuilder_ == null) { + result.rows_ = rows_; + } else { + result.rows_ = avroRowsBuilder_.build(); + } + } + if (rowsCase_ == 4) { + if (arrowRecordBatchBuilder_ == null) { + result.rows_ = rows_; + } else { + result.rows_ = arrowRecordBatchBuilder_.build(); + } + } + result.rowCount_ = rowCount_; + if (statsBuilder_ == null) { + result.stats_ = stats_; + } else { + result.stats_ = statsBuilder_.build(); + } + if (throttleStateBuilder_ == null) { + result.throttleState_ = throttleState_; + } else { + result.throttleState_ = throttleStateBuilder_.build(); + } + result.rowsCase_ = rowsCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.getDefaultInstance()) + return this; + if (other.getRowCount() != 0L) { + setRowCount(other.getRowCount()); + } + if (other.hasStats()) { + mergeStats(other.getStats()); + } + if (other.hasThrottleState()) { + mergeThrottleState(other.getThrottleState()); + } + switch (other.getRowsCase()) { + case AVRO_ROWS: + { + mergeAvroRows(other.getAvroRows()); + break; + } + case ARROW_RECORD_BATCH: + { + mergeArrowRecordBatch(other.getArrowRecordBatch()); + break; + } + case ROWS_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int rowsCase_ = 0; + private java.lang.Object rows_; + + public RowsCase getRowsCase() { + return RowsCase.forNumber(rowsCase_); + } + + public Builder clearRows() { + rowsCase_ = 0; + rows_ = null; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroRows, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder> + avroRowsBuilder_; + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + public boolean hasAvroRows() { + return rowsCase_ == 3; + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroRows getAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } else { + if (rowsCase_ == 3) { + return avroRowsBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder setAvroRows(com.google.cloud.bigquery.storage.v1beta2.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + avroRowsBuilder_.setMessage(value); + } + rowsCase_ = 3; + return this; + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder setAvroRows( + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder builderForValue) { + if (avroRowsBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + avroRowsBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 3; + return this; + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder mergeAvroRows(com.google.cloud.bigquery.storage.v1beta2.AvroRows value) { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3 + && rows_ != com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta2.AvroRows.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 3) { + avroRowsBuilder_.mergeFrom(value); + } + avroRowsBuilder_.setMessage(value); + } + rowsCase_ = 3; + return this; + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public Builder clearAvroRows() { + if (avroRowsBuilder_ == null) { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 3) { + rowsCase_ = 0; + rows_ = null; + } + avroRowsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder getAvroRowsBuilder() { + return getAvroRowsFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder getAvroRowsOrBuilder() { + if ((rowsCase_ == 3) && (avroRowsBuilder_ != null)) { + return avroRowsBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 3) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + } + /** + * + * + *
+     * Serialized row data in AVRO format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroRows, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder> + getAvroRowsFieldBuilder() { + if (avroRowsBuilder_ == null) { + if (!(rowsCase_ == 3)) { + rows_ = com.google.cloud.bigquery.storage.v1beta2.AvroRows.getDefaultInstance(); + } + avroRowsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroRows, + com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AvroRows) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 3; + onChanged(); + ; + return avroRowsBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder> + arrowRecordBatchBuilder_; + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + public boolean hasArrowRecordBatch() { + return rowsCase_ == 4; + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } else { + if (rowsCase_ == 4) { + return arrowRecordBatchBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rows_ = value; + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder setArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder builderForValue) { + if (arrowRecordBatchBuilder_ == null) { + rows_ = builderForValue.build(); + onChanged(); + } else { + arrowRecordBatchBuilder_.setMessage(builderForValue.build()); + } + rowsCase_ = 4; + return this; + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder mergeArrowRecordBatch( + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch value) { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4 + && rows_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch + .getDefaultInstance()) { + rows_ = + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_) + .mergeFrom(value) + .buildPartial(); + } else { + rows_ = value; + } + onChanged(); + } else { + if (rowsCase_ == 4) { + arrowRecordBatchBuilder_.mergeFrom(value); + } + arrowRecordBatchBuilder_.setMessage(value); + } + rowsCase_ = 4; + return this; + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public Builder clearArrowRecordBatch() { + if (arrowRecordBatchBuilder_ == null) { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + onChanged(); + } + } else { + if (rowsCase_ == 4) { + rowsCase_ = 0; + rows_ = null; + } + arrowRecordBatchBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder + getArrowRecordBatchBuilder() { + return getArrowRecordBatchFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder() { + if ((rowsCase_ == 4) && (arrowRecordBatchBuilder_ != null)) { + return arrowRecordBatchBuilder_.getMessageOrBuilder(); + } else { + if (rowsCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + } + /** + * + * + *
+     * Serialized row data in Arrow RecordBatch format.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder> + getArrowRecordBatchFieldBuilder() { + if (arrowRecordBatchBuilder_ == null) { + if (!(rowsCase_ == 4)) { + rows_ = com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.getDefaultInstance(); + } + arrowRecordBatchBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch) rows_, + getParentForChildren(), + isClean()); + rows_ = null; + } + rowsCase_ = 4; + onChanged(); + ; + return arrowRecordBatchBuilder_; + } + + private long rowCount_; + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + public long getRowCount() { + return rowCount_; + } + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @param value The rowCount to set. + * @return This builder for chaining. + */ + public Builder setRowCount(long value) { + + rowCount_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Number of serialized rows in the rows block.
+     * 
+ * + * int64 row_count = 6; + * + * @return This builder for chaining. + */ + public Builder clearRowCount() { + + rowCount_ = 0L; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.StreamStats stats_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder> + statsBuilder_; + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + public boolean hasStats() { + return statsBuilder_ != null || stats_ != null; + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return The stats. + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getStats() { + if (statsBuilder_ == null) { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } else { + return statsBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder setStats(com.google.cloud.bigquery.storage.v1beta2.StreamStats value) { + if (statsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + stats_ = value; + onChanged(); + } else { + statsBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder setStats( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder builderForValue) { + if (statsBuilder_ == null) { + stats_ = builderForValue.build(); + onChanged(); + } else { + statsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder mergeStats(com.google.cloud.bigquery.storage.v1beta2.StreamStats value) { + if (statsBuilder_ == null) { + if (stats_ != null) { + stats_ = + com.google.cloud.bigquery.storage.v1beta2.StreamStats.newBuilder(stats_) + .mergeFrom(value) + .buildPartial(); + } else { + stats_ = value; + } + onChanged(); + } else { + statsBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public Builder clearStats() { + if (statsBuilder_ == null) { + stats_ = null; + onChanged(); + } else { + stats_ = null; + statsBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder getStatsBuilder() { + + onChanged(); + return getStatsFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder getStatsOrBuilder() { + if (statsBuilder_ != null) { + return statsBuilder_.getMessageOrBuilder(); + } else { + return stats_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance() + : stats_; + } + } + /** + * + * + *
+     * Statistics for the stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder> + getStatsFieldBuilder() { + if (statsBuilder_ == null) { + statsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder>( + getStats(), getParentForChildren(), isClean()); + stats_ = null; + } + return statsBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ThrottleState throttleState_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ThrottleState, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder> + throttleStateBuilder_; + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + public boolean hasThrottleState() { + return throttleStateBuilder_ != null || throttleState_ != null; + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState() { + if (throttleStateBuilder_ == null) { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } else { + return throttleStateBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder setThrottleState(com.google.cloud.bigquery.storage.v1beta2.ThrottleState value) { + if (throttleStateBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + throttleState_ = value; + onChanged(); + } else { + throttleStateBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder setThrottleState( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder builderForValue) { + if (throttleStateBuilder_ == null) { + throttleState_ = builderForValue.build(); + onChanged(); + } else { + throttleStateBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder mergeThrottleState( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState value) { + if (throttleStateBuilder_ == null) { + if (throttleState_ != null) { + throttleState_ = + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.newBuilder(throttleState_) + .mergeFrom(value) + .buildPartial(); + } else { + throttleState_ = value; + } + onChanged(); + } else { + throttleStateBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public Builder clearThrottleState() { + if (throttleStateBuilder_ == null) { + throttleState_ = null; + onChanged(); + } else { + throttleState_ = null; + throttleStateBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder + getThrottleStateBuilder() { + + onChanged(); + return getThrottleStateFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + public com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder + getThrottleStateOrBuilder() { + if (throttleStateBuilder_ != null) { + return throttleStateBuilder_.getMessageOrBuilder(); + } else { + return throttleState_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance() + : throttleState_; + } + } + /** + * + * + *
+     * Throttling state. If unset, the latest response still describes
+     * the current throttling status.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ThrottleState, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder> + getThrottleStateFieldBuilder() { + if (throttleStateBuilder_ == null) { + throttleStateBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ThrottleState, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder, + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder>( + getThrottleState(), getParentForChildren(), isClean()); + throttleState_ = null; + } + return throttleStateBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadRowsResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadRowsResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java new file mode 100644 index 0000000000..6a451072e5 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java @@ -0,0 +1,184 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadRowsResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadRowsResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return Whether the avroRows field is set. + */ + boolean hasAvroRows(); + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + * + * @return The avroRows. + */ + com.google.cloud.bigquery.storage.v1beta2.AvroRows getAvroRows(); + /** + * + * + *
+   * Serialized row data in AVRO format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.AvroRows avro_rows = 3; + */ + com.google.cloud.bigquery.storage.v1beta2.AvroRowsOrBuilder getAvroRowsOrBuilder(); + + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return Whether the arrowRecordBatch field is set. + */ + boolean hasArrowRecordBatch(); + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + * + * @return The arrowRecordBatch. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch getArrowRecordBatch(); + /** + * + * + *
+   * Serialized row data in Arrow RecordBatch format.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ArrowRecordBatch arrow_record_batch = 4; + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowRecordBatchOrBuilder + getArrowRecordBatchOrBuilder(); + + /** + * + * + *
+   * Number of serialized rows in the rows block.
+   * 
+ * + * int64 row_count = 6; + * + * @return The rowCount. + */ + long getRowCount(); + + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return Whether the stats field is set. + */ + boolean hasStats(); + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + * + * @return The stats. + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStats getStats(); + /** + * + * + *
+   * Statistics for the stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats stats = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder getStatsOrBuilder(); + + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return Whether the throttleState field is set. + */ + boolean hasThrottleState(); + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + * + * @return The throttleState. + */ + com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState(); + /** + * + * + *
+   * Throttling state. If unset, the latest response still describes
+   * the current throttling status.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ThrottleState throttle_state = 5; + */ + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder getThrottleStateOrBuilder(); + + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.RowsCase getRowsCase(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java new file mode 100644 index 0000000000..dc38c2148e --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java @@ -0,0 +1,5203 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information about the ReadSession.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession} + */ +public final class ReadSession extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadSession) + ReadSessionOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReadSession.newBuilder() to construct. + private ReadSession(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadSession() { + name_ = ""; + dataFormat_ = 0; + table_ = ""; + streams_ = java.util.Collections.emptyList(); + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadSession(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReadSession( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 18: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (expireTime_ != null) { + subBuilder = expireTime_.toBuilder(); + } + expireTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(expireTime_); + expireTime_ = subBuilder.buildPartial(); + } + + break; + } + case 24: + { + int rawValue = input.readEnum(); + + dataFormat_ = rawValue; + break; + } + case 34: + { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder subBuilder = null; + if (schemaCase_ == 4) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_).toBuilder(); + } + schema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + schema_ = subBuilder.buildPartial(); + } + schemaCase_ = 4; + break; + } + case 42: + { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder subBuilder = null; + if (schemaCase_ == 5) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_).toBuilder(); + } + schema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + schema_ = subBuilder.buildPartial(); + } + schemaCase_ = 5; + break; + } + case 50: + { + java.lang.String s = input.readStringRequireUtf8(); + + table_ = s; + break; + } + case 58: + { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder + subBuilder = null; + if (tableModifiers_ != null) { + subBuilder = tableModifiers_.toBuilder(); + } + tableModifiers_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(tableModifiers_); + tableModifiers_ = subBuilder.buildPartial(); + } + + break; + } + case 66: + { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + subBuilder = null; + if (readOptions_ != null) { + subBuilder = readOptions_.toBuilder(); + } + readOptions_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(readOptions_); + readOptions_ = subBuilder.buildPartial(); + } + + break; + } + case 82: + { + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + streams_ = + new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + streams_.add( + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.parser(), + extensionRegistry)); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder.class); + } + + public interface TableModifiersOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + boolean hasSnapshotTime(); + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + com.google.protobuf.Timestamp getSnapshotTime(); + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder(); + } + /** + * + * + *
+   * Additional attributes when reading a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers} + */ + public static final class TableModifiers extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + TableModifiersOrBuilder { + private static final long serialVersionUID = 0L; + // Use TableModifiers.newBuilder() to construct. + private TableModifiers(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableModifiers() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableModifiers(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TableModifiers( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.protobuf.Timestamp.Builder subBuilder = null; + if (snapshotTime_ != null) { + subBuilder = snapshotTime_.toBuilder(); + } + snapshotTime_ = + input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(snapshotTime_); + snapshotTime_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder.class); + } + + public static final int SNAPSHOT_TIME_FIELD_NUMBER = 1; + private com.google.protobuf.Timestamp snapshotTime_; + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + public boolean hasSnapshotTime() { + return snapshotTime_ != null; + } + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + public com.google.protobuf.Timestamp getSnapshotTime() { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + /** + * + * + *
+     * The snapshot time of the table. If not set, interpreted as now.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + return getSnapshotTime(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (snapshotTime_ != null) { + output.writeMessage(1, getSnapshotTime()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (snapshotTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getSnapshotTime()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers other = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) obj; + + if (hasSnapshotTime() != other.hasSnapshotTime()) return false; + if (hasSnapshotTime()) { + if (!getSnapshotTime().equals(other.getSnapshotTime())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasSnapshotTime()) { + hash = (37 * hash) + SNAPSHOT_TIME_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotTime().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Additional attributes when reading a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (snapshotTimeBuilder_ == null) { + snapshotTime_ = null; + } else { + snapshotTime_ = null; + snapshotTimeBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers build() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers result = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers(this); + if (snapshotTimeBuilder_ == null) { + result.snapshotTime_ = snapshotTime_; + } else { + result.snapshotTime_ = snapshotTimeBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance()) return this; + if (other.hasSnapshotTime()) { + mergeSnapshotTime(other.getSnapshotTime()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.protobuf.Timestamp snapshotTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + snapshotTimeBuilder_; + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return Whether the snapshotTime field is set. + */ + public boolean hasSnapshotTime() { + return snapshotTimeBuilder_ != null || snapshotTime_ != null; + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + * + * @return The snapshotTime. + */ + public com.google.protobuf.Timestamp getSnapshotTime() { + if (snapshotTimeBuilder_ == null) { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } else { + return snapshotTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshotTime_ = value; + onChanged(); + } else { + snapshotTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder setSnapshotTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (snapshotTimeBuilder_ == null) { + snapshotTime_ = builderForValue.build(); + onChanged(); + } else { + snapshotTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder mergeSnapshotTime(com.google.protobuf.Timestamp value) { + if (snapshotTimeBuilder_ == null) { + if (snapshotTime_ != null) { + snapshotTime_ = + com.google.protobuf.Timestamp.newBuilder(snapshotTime_) + .mergeFrom(value) + .buildPartial(); + } else { + snapshotTime_ = value; + } + onChanged(); + } else { + snapshotTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public Builder clearSnapshotTime() { + if (snapshotTimeBuilder_ == null) { + snapshotTime_ = null; + onChanged(); + } else { + snapshotTime_ = null; + snapshotTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.Timestamp.Builder getSnapshotTimeBuilder() { + + onChanged(); + return getSnapshotTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimeOrBuilder() { + if (snapshotTimeBuilder_ != null) { + return snapshotTimeBuilder_.getMessageOrBuilder(); + } else { + return snapshotTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTime_; + } + } + /** + * + * + *
+       * The snapshot time of the table. If not set, interpreted as now.
+       * 
+ * + * .google.protobuf.Timestamp snapshot_time = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getSnapshotTimeFieldBuilder() { + if (snapshotTimeBuilder_ == null) { + snapshotTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSnapshotTime(), getParentForChildren(), isClean()); + snapshotTime_ = null; + } + return snapshotTimeBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableModifiers parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableModifiers(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public interface TableReadOptionsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + java.util.List getSelectedFieldsList(); + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + int getSelectedFieldsCount(); + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + java.lang.String getSelectedFields(int index); + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + com.google.protobuf.ByteString getSelectedFieldsBytes(int index); + + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Currently, only a single predicate that is a comparison between a column
+     * and a constant value is supported. Aggregates are not supported.
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + java.lang.String getRowRestriction(); + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Currently, only a single predicate that is a comparison between a column
+     * and a constant value is supported. Aggregates are not supported.
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + com.google.protobuf.ByteString getRowRestrictionBytes(); + } + /** + * + * + *
+   * Options dictating how we read a table.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions} + */ + public static final class TableReadOptions extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + TableReadOptionsOrBuilder { + private static final long serialVersionUID = 0L; + // Use TableReadOptions.newBuilder() to construct. + private TableReadOptions(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private TableReadOptions() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.EMPTY; + rowRestriction_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new TableReadOptions(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private TableReadOptions( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + selectedFields_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + selectedFields_.add(s); + break; + } + case 18: + { + java.lang.String s = input.readStringRequireUtf8(); + + rowRestriction_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + selectedFields_ = selectedFields_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder.class); + } + + public static final int SELECTED_FIELDS_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList selectedFields_; + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + return selectedFields_; + } + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + /** + * + * + *
+     * Names of the fields in the table that should be read. If empty, all
+     * fields will be read. If the specified field is a nested field, all
+     * the sub-fields in the field will be selected. The output field order is
+     * unrelated to the order of fields in selected_fields.
+     * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the value to return. + * @return The bytes of the selectedFields at the given index. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + + public static final int ROW_RESTRICTION_FIELD_NUMBER = 2; + private volatile java.lang.Object rowRestriction_; + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Currently, only a single predicate that is a comparison between a column
+     * and a constant value is supported. Aggregates are not supported.
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } + } + /** + * + * + *
+     * SQL text filtering statement, similar to a WHERE clause in a query.
+     * Currently, only a single predicate that is a comparison between a column
+     * and a constant value is supported. Aggregates are not supported.
+     * Examples: "int_field > 5"
+     *           "date_field = CAST('2014-9-27' as DATE)"
+     *           "nullable_field is not NULL"
+     *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+     *           "numeric_field BETWEEN 1.0 AND 5.0"
+     * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + for (int i = 0; i < selectedFields_.size(); i++) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, selectedFields_.getRaw(i)); + } + if (!getRowRestrictionBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, rowRestriction_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < selectedFields_.size(); i++) { + dataSize += computeStringSizeNoTag(selectedFields_.getRaw(i)); + } + size += dataSize; + size += 1 * getSelectedFieldsList().size(); + } + if (!getRowRestrictionBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, rowRestriction_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions other = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) obj; + + if (!getSelectedFieldsList().equals(other.getSelectedFieldsList())) return false; + if (!getRowRestriction().equals(other.getRowRestriction())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (getSelectedFieldsCount() > 0) { + hash = (37 * hash) + SELECTED_FIELDS_FIELD_NUMBER; + hash = (53 * hash) + getSelectedFieldsList().hashCode(); + } + hash = (37 * hash) + ROW_RESTRICTION_FIELD_NUMBER; + hash = (53 * hash) + getRowRestriction().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Options dictating how we read a table.
+     * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + .class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + selectedFields_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + rowRestriction_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions build() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions result = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) != 0)) { + selectedFields_ = selectedFields_.getUnmodifiableView(); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.selectedFields_ = selectedFields_; + result.rowRestriction_ = rowRestriction_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) { + return mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance()) return this; + if (!other.selectedFields_.isEmpty()) { + if (selectedFields_.isEmpty()) { + selectedFields_ = other.selectedFields_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureSelectedFieldsIsMutable(); + selectedFields_.addAll(other.selectedFields_); + } + onChanged(); + } + if (!other.getRowRestriction().isEmpty()) { + rowRestriction_ = other.rowRestriction_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int bitField0_; + + private com.google.protobuf.LazyStringList selectedFields_ = + com.google.protobuf.LazyStringArrayList.EMPTY; + + private void ensureSelectedFieldsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + selectedFields_ = new com.google.protobuf.LazyStringArrayList(selectedFields_); + bitField0_ |= 0x00000001; + } + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return A list containing the selectedFields. + */ + public com.google.protobuf.ProtocolStringList getSelectedFieldsList() { + return selectedFields_.getUnmodifiableView(); + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return The count of selectedFields. + */ + public int getSelectedFieldsCount() { + return selectedFields_.size(); + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index of the element to return. + * @return The selectedFields at the given index. + */ + public java.lang.String getSelectedFields(int index) { + return selectedFields_.get(index); + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The bytes of the selectedFields to add. + */ + public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { + return selectedFields_.getByteString(index); + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param index The index to set the value at. + * @param value The selectedFields to set. + * @return This builder for chaining. + */ + public Builder setSelectedFields(int index, java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.set(index, value); + onChanged(); + return this; + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFields(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + onChanged(); + return this; + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param values The selectedFields to add. + * @return This builder for chaining. + */ + public Builder addAllSelectedFields(java.lang.Iterable values) { + ensureSelectedFieldsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, selectedFields_); + onChanged(); + return this; + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @return This builder for chaining. + */ + public Builder clearSelectedFields() { + selectedFields_ = com.google.protobuf.LazyStringArrayList.EMPTY; + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + /** + * + * + *
+       * Names of the fields in the table that should be read. If empty, all
+       * fields will be read. If the specified field is a nested field, all
+       * the sub-fields in the field will be selected. The output field order is
+       * unrelated to the order of fields in selected_fields.
+       * 
+ * + * repeated string selected_fields = 1; + * + * @param value The bytes of the selectedFields to add. + * @return This builder for chaining. + */ + public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + ensureSelectedFieldsIsMutable(); + selectedFields_.add(value); + onChanged(); + return this; + } + + private java.lang.Object rowRestriction_ = ""; + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Currently, only a single predicate that is a comparison between a column
+       * and a constant value is supported. Aggregates are not supported.
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       * 
+ * + * string row_restriction = 2; + * + * @return The rowRestriction. + */ + public java.lang.String getRowRestriction() { + java.lang.Object ref = rowRestriction_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + rowRestriction_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Currently, only a single predicate that is a comparison between a column
+       * and a constant value is supported. Aggregates are not supported.
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       * 
+ * + * string row_restriction = 2; + * + * @return The bytes for rowRestriction. + */ + public com.google.protobuf.ByteString getRowRestrictionBytes() { + java.lang.Object ref = rowRestriction_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + rowRestriction_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Currently, only a single predicate that is a comparison between a column
+       * and a constant value is supported. Aggregates are not supported.
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       * 
+ * + * string row_restriction = 2; + * + * @param value The rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestriction(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + rowRestriction_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Currently, only a single predicate that is a comparison between a column
+       * and a constant value is supported. Aggregates are not supported.
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       * 
+ * + * string row_restriction = 2; + * + * @return This builder for chaining. + */ + public Builder clearRowRestriction() { + + rowRestriction_ = getDefaultInstance().getRowRestriction(); + onChanged(); + return this; + } + /** + * + * + *
+       * SQL text filtering statement, similar to a WHERE clause in a query.
+       * Currently, only a single predicate that is a comparison between a column
+       * and a constant value is supported. Aggregates are not supported.
+       * Examples: "int_field > 5"
+       *           "date_field = CAST('2014-9-27' as DATE)"
+       *           "nullable_field is not NULL"
+       *           "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))"
+       *           "numeric_field BETWEEN 1.0 AND 5.0"
+       * 
+ * + * string row_restriction = 2; + * + * @param value The bytes for rowRestriction to set. + * @return This builder for chaining. + */ + public Builder setRowRestrictionBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + rowRestriction_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TableReadOptions parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableReadOptions(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public enum SchemaCase implements com.google.protobuf.Internal.EnumLite { + AVRO_SCHEMA(4), + ARROW_SCHEMA(5), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 4: + return AVRO_SCHEMA; + case 5: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int EXPIRE_TIME_FIELD_NUMBER = 2; + private com.google.protobuf.Timestamp expireTime_; + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return expireTime_ != null; + } + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + return expireTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : expireTime_; + } + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + return getExpireTime(); + } + + public static final int DATA_FORMAT_FIELD_NUMBER = 3; + private int dataFormat_; + /** + * + * + *
+   * Immutable. Data format of the output data. Behavior defaults to Apache
+   * Avro.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + public int getDataFormatValue() { + return dataFormat_; + } + /** + * + * + *
+   * Immutable. Data format of the output data. Behavior defaults to Apache
+   * Avro.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + public com.google.cloud.bigquery.storage.v1beta2.DataFormat getDataFormat() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1beta2.DataFormat result = + com.google.cloud.bigquery.storage.v1beta2.DataFormat.valueOf(dataFormat_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.DataFormat.UNRECOGNIZED + : result; + } + + public static final int AVRO_SCHEMA_FIELD_NUMBER = 4; + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + public boolean hasAvroSchema() { + return schemaCase_ == 4; + } + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 5; + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + public boolean hasArrowSchema() { + return schemaCase_ == 5; + } + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + + public static final int TABLE_FIELD_NUMBER = 6; + private volatile java.lang.Object table_; + /** + * + * + *
+   * Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * string table = 6; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } + } + /** + * + * + *
+   * Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * string table = 6; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int TABLE_MODIFIERS_FIELD_NUMBER = 7; + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers tableModifiers_; + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + public boolean hasTableModifiers() { + return tableModifiers_ != null; + } + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers getTableModifiers() { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.getDefaultInstance() + : tableModifiers_; + } + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + return getTableModifiers(); + } + + public static final int READ_OPTIONS_FIELD_NUMBER = 8; + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions readOptions_; + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + public boolean hasReadOptions() { + return readOptions_ != null; + } + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions getReadOptions() { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + return getReadOptions(); + } + + public static final int STREAMS_FIELD_NUMBER = 10; + private java.util.List streams_; + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStreamsList() { + return streams_; + } + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsOrBuilderList() { + return streams_; + } + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStreamsCount() { + return streams_.size(); + } + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getStreams(int index) { + return streams_.get(index); + } + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + return streams_.get(index); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (expireTime_ != null) { + output.writeMessage(2, getExpireTime()); + } + if (dataFormat_ + != com.google.cloud.bigquery.storage.v1beta2.DataFormat.DATA_FORMAT_UNSPECIFIED + .getNumber()) { + output.writeEnum(3, dataFormat_); + } + if (schemaCase_ == 4) { + output.writeMessage(4, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 5) { + output.writeMessage(5, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } + if (!getTableBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 6, table_); + } + if (tableModifiers_ != null) { + output.writeMessage(7, getTableModifiers()); + } + if (readOptions_ != null) { + output.writeMessage(8, getReadOptions()); + } + for (int i = 0; i < streams_.size(); i++) { + output.writeMessage(10, streams_.get(i)); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (expireTime_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getExpireTime()); + } + if (dataFormat_ + != com.google.cloud.bigquery.storage.v1beta2.DataFormat.DATA_FORMAT_UNSPECIFIED + .getNumber()) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, dataFormat_); + } + if (schemaCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 5) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 5, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } + if (!getTableBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, table_); + } + if (tableModifiers_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(7, getTableModifiers()); + } + if (readOptions_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(8, getReadOptions()); + } + for (int i = 0; i < streams_.size(); i++) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, streams_.get(i)); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadSession other = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession) obj; + + if (!getName().equals(other.getName())) return false; + if (hasExpireTime() != other.hasExpireTime()) return false; + if (hasExpireTime()) { + if (!getExpireTime().equals(other.getExpireTime())) return false; + } + if (dataFormat_ != other.dataFormat_) return false; + if (!getTable().equals(other.getTable())) return false; + if (hasTableModifiers() != other.hasTableModifiers()) return false; + if (hasTableModifiers()) { + if (!getTableModifiers().equals(other.getTableModifiers())) return false; + } + if (hasReadOptions() != other.hasReadOptions()) return false; + if (hasReadOptions()) { + if (!getReadOptions().equals(other.getReadOptions())) return false; + } + if (!getStreamsList().equals(other.getStreamsList())) return false; + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 4: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 5: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + if (hasExpireTime()) { + hash = (37 * hash) + EXPIRE_TIME_FIELD_NUMBER; + hash = (53 * hash) + getExpireTime().hashCode(); + } + hash = (37 * hash) + DATA_FORMAT_FIELD_NUMBER; + hash = (53 * hash) + dataFormat_; + hash = (37 * hash) + TABLE_FIELD_NUMBER; + hash = (53 * hash) + getTable().hashCode(); + if (hasTableModifiers()) { + hash = (37 * hash) + TABLE_MODIFIERS_FIELD_NUMBER; + hash = (53 * hash) + getTableModifiers().hashCode(); + } + if (hasReadOptions()) { + hash = (37 * hash) + READ_OPTIONS_FIELD_NUMBER; + hash = (53 * hash) + getReadOptions().hashCode(); + } + if (getStreamsCount() > 0) { + hash = (37 * hash) + STREAMS_FIELD_NUMBER; + hash = (53 * hash) + getStreamsList().hashCode(); + } + switch (schemaCase_) { + case 4: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 5: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ReadSession prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Information about the ReadSession.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadSession} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadSession) + com.google.cloud.bigquery.storage.v1beta2.ReadSessionOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.class, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadSession.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStreamsFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + if (expireTimeBuilder_ == null) { + expireTime_ = null; + } else { + expireTime_ = null; + expireTimeBuilder_ = null; + } + dataFormat_ = 0; + + table_ = ""; + + if (tableModifiersBuilder_ == null) { + tableModifiers_ = null; + } else { + tableModifiers_ = null; + tableModifiersBuilder_ = null; + } + if (readOptionsBuilder_ == null) { + readOptions_ = null; + } else { + readOptions_ = null; + readOptionsBuilder_ = null; + } + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + streamsBuilder_.clear(); + } + schemaCase_ = 0; + schema_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession build() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadSession result = + new com.google.cloud.bigquery.storage.v1beta2.ReadSession(this); + int from_bitField0_ = bitField0_; + result.name_ = name_; + if (expireTimeBuilder_ == null) { + result.expireTime_ = expireTime_; + } else { + result.expireTime_ = expireTimeBuilder_.build(); + } + result.dataFormat_ = dataFormat_; + if (schemaCase_ == 4) { + if (avroSchemaBuilder_ == null) { + result.schema_ = schema_; + } else { + result.schema_ = avroSchemaBuilder_.build(); + } + } + if (schemaCase_ == 5) { + if (arrowSchemaBuilder_ == null) { + result.schema_ = schema_; + } else { + result.schema_ = arrowSchemaBuilder_.build(); + } + } + result.table_ = table_; + if (tableModifiersBuilder_ == null) { + result.tableModifiers_ = tableModifiers_; + } else { + result.tableModifiers_ = tableModifiersBuilder_.build(); + } + if (readOptionsBuilder_ == null) { + result.readOptions_ = readOptions_; + } else { + result.readOptions_ = readOptionsBuilder_.build(); + } + if (streamsBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0)) { + streams_ = java.util.Collections.unmodifiableList(streams_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.streams_ = streams_; + } else { + result.streams_ = streamsBuilder_.build(); + } + result.schemaCase_ = schemaCase_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadSession) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadSession) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadSession other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadSession.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.hasExpireTime()) { + mergeExpireTime(other.getExpireTime()); + } + if (other.dataFormat_ != 0) { + setDataFormatValue(other.getDataFormatValue()); + } + if (!other.getTable().isEmpty()) { + table_ = other.table_; + onChanged(); + } + if (other.hasTableModifiers()) { + mergeTableModifiers(other.getTableModifiers()); + } + if (other.hasReadOptions()) { + mergeReadOptions(other.getReadOptions()); + } + if (streamsBuilder_ == null) { + if (!other.streams_.isEmpty()) { + if (streams_.isEmpty()) { + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStreamsIsMutable(); + streams_.addAll(other.streams_); + } + onChanged(); + } + } else { + if (!other.streams_.isEmpty()) { + if (streamsBuilder_.isEmpty()) { + streamsBuilder_.dispose(); + streamsBuilder_ = null; + streams_ = other.streams_; + bitField0_ = (bitField0_ & ~0x00000001); + streamsBuilder_ = + com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders + ? getStreamsFieldBuilder() + : null; + } else { + streamsBuilder_.addAllMessages(other.streams_); + } + } + } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ReadSession parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ReadSession) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Unique identifier for the session, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private com.google.protobuf.Timestamp expireTime_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + expireTimeBuilder_; + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + public boolean hasExpireTime() { + return expireTimeBuilder_ != null || expireTime_ != null; + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + public com.google.protobuf.Timestamp getExpireTime() { + if (expireTimeBuilder_ == null) { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } else { + return expireTimeBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + expireTime_ = value; + onChanged(); + } else { + expireTimeBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setExpireTime(com.google.protobuf.Timestamp.Builder builderForValue) { + if (expireTimeBuilder_ == null) { + expireTime_ = builderForValue.build(); + onChanged(); + } else { + expireTimeBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeExpireTime(com.google.protobuf.Timestamp value) { + if (expireTimeBuilder_ == null) { + if (expireTime_ != null) { + expireTime_ = + com.google.protobuf.Timestamp.newBuilder(expireTime_).mergeFrom(value).buildPartial(); + } else { + expireTime_ = value; + } + onChanged(); + } else { + expireTimeBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearExpireTime() { + if (expireTimeBuilder_ == null) { + expireTime_ = null; + onChanged(); + } else { + expireTime_ = null; + expireTimeBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.Timestamp.Builder getExpireTimeBuilder() { + + onChanged(); + return getExpireTimeFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder() { + if (expireTimeBuilder_ != null) { + return expireTimeBuilder_.getMessageOrBuilder(); + } else { + return expireTime_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : expireTime_; + } + } + /** + * + * + *
+     * Output only. Time at which the session becomes invalid. After this time,
+     * subsequent requests to read this Session will return errors. The
+     * expire_time is automatically assigned and currently cannot be specified or
+     * updated.
+     * 
+ * + * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getExpireTimeFieldBuilder() { + if (expireTimeBuilder_ == null) { + expireTimeBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getExpireTime(), getParentForChildren(), isClean()); + expireTime_ = null; + } + return expireTimeBuilder_; + } + + private int dataFormat_ = 0; + /** + * + * + *
+     * Immutable. Data format of the output data. Behavior defaults to Apache
+     * Avro.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + public int getDataFormatValue() { + return dataFormat_; + } + /** + * + * + *
+     * Immutable. Data format of the output data. Behavior defaults to Apache
+     * Avro.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The enum numeric value on the wire for dataFormat to set. + * @return This builder for chaining. + */ + public Builder setDataFormatValue(int value) { + dataFormat_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Immutable. Data format of the output data. Behavior defaults to Apache
+     * Avro.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + public com.google.cloud.bigquery.storage.v1beta2.DataFormat getDataFormat() { + @SuppressWarnings("deprecation") + com.google.cloud.bigquery.storage.v1beta2.DataFormat result = + com.google.cloud.bigquery.storage.v1beta2.DataFormat.valueOf(dataFormat_); + return result == null + ? com.google.cloud.bigquery.storage.v1beta2.DataFormat.UNRECOGNIZED + : result; + } + /** + * + * + *
+     * Immutable. Data format of the output data. Behavior defaults to Apache
+     * Avro.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @param value The dataFormat to set. + * @return This builder for chaining. + */ + public Builder setDataFormat(com.google.cloud.bigquery.storage.v1beta2.DataFormat value) { + if (value == null) { + throw new NullPointerException(); + } + + dataFormat_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+     * Immutable. Data format of the output data. Behavior defaults to Apache
+     * Avro.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return This builder for chaining. + */ + public Builder clearDataFormat() { + + dataFormat_ = 0; + onChanged(); + return this; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + avroSchemaBuilder_; + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + public boolean hasAvroSchema() { + return schemaCase_ == 4; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 4) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 4; + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 4; + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 4) { + avroSchemaBuilder_.mergeFrom(value); + } + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 4; + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 4) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 4) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if ((schemaCase_ == 4) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 4) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 4)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 4; + onChanged(); + ; + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + public boolean hasArrowSchema() { + return schemaCase_ == 5; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 5) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 5; + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 5; + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 5) { + arrowSchemaBuilder_.mergeFrom(value); + } + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 5; + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 5) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if ((schemaCase_ == 5) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 5) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 5)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 5; + onChanged(); + ; + return arrowSchemaBuilder_; + } + + private java.lang.Object table_ = ""; + /** + * + * + *
+     * Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * string table = 6; + * + * @return The table. + */ + public java.lang.String getTable() { + java.lang.Object ref = table_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + table_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * string table = 6; + * + * @return The bytes for table. + */ + public com.google.protobuf.ByteString getTableBytes() { + java.lang.Object ref = table_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + table_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * string table = 6; + * + * @param value The table to set. + * @return This builder for chaining. + */ + public Builder setTable(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + table_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * string table = 6; + * + * @return This builder for chaining. + */ + public Builder clearTable() { + + table_ = getDefaultInstance().getTable(); + onChanged(); + return this; + } + /** + * + * + *
+     * Table that this ReadSession is reading from, in the form
+     * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+     * 
+ * + * string table = 6; + * + * @param value The bytes for table to set. + * @return This builder for chaining. + */ + public Builder setTableBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + table_ = value; + onChanged(); + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers tableModifiers_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder> + tableModifiersBuilder_; + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + public boolean hasTableModifiers() { + return tableModifiersBuilder_ != null || tableModifiers_ != null; + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + getTableModifiers() { + if (tableModifiersBuilder_ == null) { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } else { + return tableModifiersBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tableModifiers_ = value; + onChanged(); + } else { + tableModifiersBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setTableModifiers( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder + builderForValue) { + if (tableModifiersBuilder_ == null) { + tableModifiers_ = builderForValue.build(); + onChanged(); + } else { + tableModifiersBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeTableModifiers( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers value) { + if (tableModifiersBuilder_ == null) { + if (tableModifiers_ != null) { + tableModifiers_ = + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.newBuilder( + tableModifiers_) + .mergeFrom(value) + .buildPartial(); + } else { + tableModifiers_ = value; + } + onChanged(); + } else { + tableModifiersBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearTableModifiers() { + if (tableModifiersBuilder_ == null) { + tableModifiers_ = null; + onChanged(); + } else { + tableModifiers_ = null; + tableModifiersBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder + getTableModifiersBuilder() { + + onChanged(); + return getTableModifiersFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder() { + if (tableModifiersBuilder_ != null) { + return tableModifiersBuilder_.getMessageOrBuilder(); + } else { + return tableModifiers_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers + .getDefaultInstance() + : tableModifiers_; + } + } + /** + * + * + *
+     * Optional. Any modifiers which are applied when reading from the specified
+     * table.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder> + getTableModifiersFieldBuilder() { + if (tableModifiersBuilder_ == null) { + tableModifiersBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder>( + getTableModifiers(), getParentForChildren(), isClean()); + tableModifiers_ = null; + } + return tableModifiersBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions readOptions_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder> + readOptionsBuilder_; + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + public boolean hasReadOptions() { + return readOptionsBuilder_ != null || readOptions_ != null; + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions getReadOptions() { + if (readOptionsBuilder_ == null) { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } else { + return readOptionsBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + readOptions_ = value; + onChanged(); + } else { + readOptionsBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder setReadOptions( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + builderForValue) { + if (readOptionsBuilder_ == null) { + readOptions_ = builderForValue.build(); + onChanged(); + } else { + readOptionsBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder mergeReadOptions( + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions value) { + if (readOptionsBuilder_ == null) { + if (readOptions_ != null) { + readOptions_ = + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.newBuilder( + readOptions_) + .mergeFrom(value) + .buildPartial(); + } else { + readOptions_ = value; + } + onChanged(); + } else { + readOptionsBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public Builder clearReadOptions() { + if (readOptionsBuilder_ == null) { + readOptions_ = null; + onChanged(); + } else { + readOptions_ = null; + readOptionsBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder + getReadOptionsBuilder() { + + onChanged(); + return getReadOptionsFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder() { + if (readOptionsBuilder_ != null) { + return readOptionsBuilder_.getMessageOrBuilder(); + } else { + return readOptions_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions + .getDefaultInstance() + : readOptions_; + } + } + /** + * + * + *
+     * Optional. Read options for this session (e.g. column selection, filters).
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder> + getReadOptionsFieldBuilder() { + if (readOptionsBuilder_ == null) { + readOptionsBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder>( + getReadOptions(), getParentForChildren(), isClean()); + readOptions_ = null; + } + return readOptionsBuilder_; + } + + private java.util.List streams_ = + java.util.Collections.emptyList(); + + private void ensureStreamsIsMutable() { + if (!((bitField0_ & 0x00000001) != 0)) { + streams_ = + new java.util.ArrayList(streams_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + streamsBuilder_; + + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List getStreamsList() { + if (streamsBuilder_ == null) { + return java.util.Collections.unmodifiableList(streams_); + } else { + return streamsBuilder_.getMessageList(); + } + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public int getStreamsCount() { + if (streamsBuilder_ == null) { + return streams_.size(); + } else { + return streamsBuilder_.getCount(); + } + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getStreams(int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessage(index); + } + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.set(index, value); + onChanged(); + } else { + streamsBuilder_.setMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.set(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(value); + onChanged(); + } else { + streamsBuilder_.addMessage(value); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (streamsBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureStreamsIsMutable(); + streams_.add(index, value); + onChanged(); + } else { + streamsBuilder_.addMessage(index, value); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addStreams( + int index, com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.add(index, builderForValue.build()); + onChanged(); + } else { + streamsBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder addAllStreams( + java.lang.Iterable values) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + com.google.protobuf.AbstractMessageLite.Builder.addAll(values, streams_); + onChanged(); + } else { + streamsBuilder_.addAllMessages(values); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearStreams() { + if (streamsBuilder_ == null) { + streams_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + streamsBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder removeStreams(int index) { + if (streamsBuilder_ == null) { + ensureStreamsIsMutable(); + streams_.remove(index); + onChanged(); + } else { + streamsBuilder_.remove(index); + } + return this; + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder getStreamsBuilder( + int index) { + return getStreamsFieldBuilder().getBuilder(index); + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getStreamsOrBuilder( + int index) { + if (streamsBuilder_ == null) { + return streams_.get(index); + } else { + return streamsBuilder_.getMessageOrBuilder(index); + } + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsOrBuilderList() { + if (streamsBuilder_ != null) { + return streamsBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(streams_); + } + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder addStreamsBuilder() { + return getStreamsFieldBuilder() + .addBuilder(com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()); + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder addStreamsBuilder( + int index) { + return getStreamsFieldBuilder() + .addBuilder( + index, com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()); + } + /** + * + * + *
+     * Output only. A list of streams created with the session.
+     * At least one stream is created with the session. In the future, larger
+     * request_stream_count values *may* result in this list being unpopulated,
+     * in that case, the user will need to use a List method to get the streams
+     * instead, which is not yet available.
+     * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public java.util.List + getStreamsBuilderList() { + return getStreamsFieldBuilder().getBuilderList(); + } + + private com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + getStreamsFieldBuilder() { + if (streamsBuilder_ == null) { + streamsBuilder_ = + new com.google.protobuf.RepeatedFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder>( + streams_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); + streams_ = null; + } + return streamsBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadSession) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadSession) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadSession DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadSession(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadSession getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadSession parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadSession(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadSession getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java new file mode 100644 index 0000000000..bc65716270 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java @@ -0,0 +1,410 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadSessionOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadSession) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Output only. Unique identifier for the session, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the expireTime field is set. + */ + boolean hasExpireTime(); + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The expireTime. + */ + com.google.protobuf.Timestamp getExpireTime(); + /** + * + * + *
+   * Output only. Time at which the session becomes invalid. After this time,
+   * subsequent requests to read this Session will return errors. The
+   * expire_time is automatically assigned and currently cannot be specified or
+   * updated.
+   * 
+ * + * .google.protobuf.Timestamp expire_time = 2 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.protobuf.TimestampOrBuilder getExpireTimeOrBuilder(); + + /** + * + * + *
+   * Immutable. Data format of the output data. Behavior defaults to Apache
+   * Avro.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The enum numeric value on the wire for dataFormat. + */ + int getDataFormatValue(); + /** + * + * + *
+   * Immutable. Data format of the output data. Behavior defaults to Apache
+   * Avro.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.DataFormat data_format = 3 [(.google.api.field_behavior) = IMMUTABLE]; + * + * + * @return The dataFormat. + */ + com.google.cloud.bigquery.storage.v1beta2.DataFormat getDataFormat(); + + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema(); + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema(); + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder(); + + /** + * + * + *
+   * Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * string table = 6; + * + * @return The table. + */ + java.lang.String getTable(); + /** + * + * + *
+   * Table that this ReadSession is reading from, in the form
+   * `projects/{project_id}/datasets/{dataset_id}/tables/{table_id}
+   * 
+ * + * string table = 6; + * + * @return The bytes for table. + */ + com.google.protobuf.ByteString getTableBytes(); + + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the tableModifiers field is set. + */ + boolean hasTableModifiers(); + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The tableModifiers. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers getTableModifiers(); + /** + * + * + *
+   * Optional. Any modifiers which are applied when reading from the specified
+   * table.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiers table_modifiers = 7 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableModifiersOrBuilder + getTableModifiersOrBuilder(); + + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return Whether the readOptions field is set. + */ + boolean hasReadOptions(); + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + * + * @return The readOptions. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions getReadOptions(); + /** + * + * + *
+   * Optional. Read options for this session (e.g. column selection, filters).
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptions read_options = 8 [(.google.api.field_behavior) = OPTIONAL]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadSession.TableReadOptionsOrBuilder + getReadOptionsOrBuilder(); + + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List getStreamsList(); + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStream getStreams(int index); + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + int getStreamsCount(); + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + java.util.List + getStreamsOrBuilderList(); + /** + * + * + *
+   * Output only. A list of streams created with the session.
+   * At least one stream is created with the session. In the future, larger
+   * request_stream_count values *may* result in this list being unpopulated,
+   * in that case, the user will need to use a List method to get the streams
+   * instead, which is not yet available.
+   * 
+ * + * + * repeated .google.cloud.bigquery.storage.v1beta2.ReadStream streams = 10 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getStreamsOrBuilder(int index); + + public com.google.cloud.bigquery.storage.v1beta2.ReadSession.SchemaCase getSchemaCase(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java new file mode 100644 index 0000000000..f1f447876a --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java @@ -0,0 +1,645 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information about a single stream that gets data out of the storage system.
+ * Most of the information about `ReadStream` instances is aggregated, making
+ * `ReadStream` lightweight.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadStream} + */ +public final class ReadStream extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ReadStream) + ReadStreamOrBuilder { + private static final long serialVersionUID = 0L; + // Use ReadStream.newBuilder() to construct. + private ReadStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ReadStream() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ReadStream(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ReadStream( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.class, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ReadStream)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ReadStream other = + (com.google.cloud.bigquery.storage.v1beta2.ReadStream) obj; + + if (!getName().equals(other.getName())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.cloud.bigquery.storage.v1beta2.ReadStream prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Information about a single stream that gets data out of the storage system.
+   * Most of the information about `ReadStream` instances is aggregated, making
+   * `ReadStream` lightweight.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ReadStream} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ReadStream) + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.class, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ReadStream.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream build() { + com.google.cloud.bigquery.storage.v1beta2.ReadStream result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ReadStream result = + new com.google.cloud.bigquery.storage.v1beta2.ReadStream(this); + result.name_ = name_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ReadStream) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ReadStream) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadStream other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ReadStream parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ReadStream) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Output only. Name of the stream, in the form
+     * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+     * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ReadStream) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ReadStream) + private static final com.google.cloud.bigquery.storage.v1beta2.ReadStream DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ReadStream(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ReadStream getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ReadStream parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ReadStream(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java new file mode 100644 index 0000000000..0f2f0660af --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ReadStreamOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ReadStream) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Output only. Name of the stream, in the form
+   * `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`.
+   * 
+ * + * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java new file mode 100644 index 0000000000..518c2c02ab --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java @@ -0,0 +1,772 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Request message for `SplitReadStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest} + */ +public final class SplitReadStreamRequest extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + SplitReadStreamRequestOrBuilder { + private static final long serialVersionUID = 0L; + // Use SplitReadStreamRequest.newBuilder() to construct. + private SplitReadStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamRequest() { + name_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamRequest(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SplitReadStreamRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + + name_ = s; + break; + } + case 17: + { + fraction_ = input.readDouble(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.Builder.class); + } + + public static final int NAME_FIELD_NUMBER = 1; + private volatile java.lang.Object name_; + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } + } + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int FRACTION_FIELD_NUMBER = 2; + private double fraction_; + /** + * + * + *
+   * A value in the range (0.0, 1.0) that specifies the fractional point at
+   * which the original stream should be split. The actual split point is
+   * evaluated on pre-filtered rows, so if a filter is provided, then there is
+   * no guarantee that the division of the rows between the new child streams
+   * will be proportional to this fractional value. Additionally, because the
+   * server-side unit for assigning data is collections of rows, this fraction
+   * will always map to a data storage boundary on the server side.
+   * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + public double getFraction() { + return fraction_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!getNameBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); + } + if (fraction_ != 0D) { + output.writeDouble(2, fraction_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!getNameBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); + } + if (fraction_ != 0D) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, fraction_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest other = + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) obj; + + if (!getName().equals(other.getName())) return false; + if (java.lang.Double.doubleToLongBits(getFraction()) + != java.lang.Double.doubleToLongBits(other.getFraction())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + NAME_FIELD_NUMBER; + hash = (53 * hash) + getName().hashCode(); + hash = (37 * hash) + FRACTION_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getFraction())); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Request message for `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + name_ = ""; + + fraction_ = 0D; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest build() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest result = + new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest(this); + result.name_ = name_; + result.fraction_ = fraction_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest.getDefaultInstance()) + return this; + if (!other.getName().isEmpty()) { + name_ = other.name_; + onChanged(); + } + if (other.getFraction() != 0D) { + setFraction(other.getFraction()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private java.lang.Object name_ = ""; + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + public java.lang.String getName() { + java.lang.Object ref = name_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + name_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + public com.google.protobuf.ByteString getNameBytes() { + java.lang.Object ref = name_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + name_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The name to set. + * @return This builder for chaining. + */ + public Builder setName(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + name_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearName() { + + name_ = getDefaultInstance().getName(); + onChanged(); + return this; + } + /** + * + * + *
+     * Required. Name of the stream to split.
+     * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for name to set. + * @return This builder for chaining. + */ + public Builder setNameBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + name_ = value; + onChanged(); + return this; + } + + private double fraction_; + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + public double getFraction() { + return fraction_; + } + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @param value The fraction to set. + * @return This builder for chaining. + */ + public Builder setFraction(double value) { + + fraction_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * A value in the range (0.0, 1.0) that specifies the fractional point at
+     * which the original stream should be split. The actual split point is
+     * evaluated on pre-filtered rows, so if a filter is provided, then there is
+     * no guarantee that the division of the rows between the new child streams
+     * will be proportional to this fractional value. Additionally, because the
+     * server-side unit for assigning data is collections of rows, this fraction
+     * will always map to a data storage boundary on the server side.
+     * 
+ * + * double fraction = 2; + * + * @return This builder for chaining. + */ + public Builder clearFraction() { + + fraction_ = 0D; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + private static final com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SplitReadStreamRequest(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java new file mode 100644 index 0000000000..3a8f4fb7de --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java @@ -0,0 +1,73 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface SplitReadStreamRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.SplitReadStreamRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The name. + */ + java.lang.String getName(); + /** + * + * + *
+   * Required. Name of the stream to split.
+   * 
+ * + * + * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for name. + */ + com.google.protobuf.ByteString getNameBytes(); + + /** + * + * + *
+   * A value in the range (0.0, 1.0) that specifies the fractional point at
+   * which the original stream should be split. The actual split point is
+   * evaluated on pre-filtered rows, so if a filter is provided, then there is
+   * no guarantee that the division of the rows between the new child streams
+   * will be proportional to this fractional value. Additionally, because the
+   * server-side unit for assigning data is collections of rows, this fraction
+   * will always map to a data storage boundary on the server side.
+   * 
+ * + * double fraction = 2; + * + * @return The fraction. + */ + double getFraction(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java new file mode 100644 index 0000000000..e91eedb8ef --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java @@ -0,0 +1,1059 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Response message for `SplitReadStream`.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse} + */ +public final class SplitReadStreamResponse extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + SplitReadStreamResponseOrBuilder { + private static final long serialVersionUID = 0L; + // Use SplitReadStreamResponse.newBuilder() to construct. + private SplitReadStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private SplitReadStreamResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new SplitReadStreamResponse(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SplitReadStreamResponse( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder subBuilder = null; + if (primaryStream_ != null) { + subBuilder = primaryStream_.toBuilder(); + } + primaryStream_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(primaryStream_); + primaryStream_ = subBuilder.buildPartial(); + } + + break; + } + case 18: + { + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder subBuilder = null; + if (remainderStream_ != null) { + subBuilder = remainderStream_.toBuilder(); + } + remainderStream_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(remainderStream_); + remainderStream_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.Builder.class); + } + + public static final int PRIMARY_STREAM_FIELD_NUMBER = 1; + private com.google.cloud.bigquery.storage.v1beta2.ReadStream primaryStream_; + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + public boolean hasPrimaryStream() { + return primaryStream_ != null; + } + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getPrimaryStream() { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getPrimaryStreamOrBuilder() { + return getPrimaryStream(); + } + + public static final int REMAINDER_STREAM_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.ReadStream remainderStream_; + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + public boolean hasRemainderStream() { + return remainderStream_ != null; + } + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getRemainderStream() { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder + getRemainderStreamOrBuilder() { + return getRemainderStream(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (primaryStream_ != null) { + output.writeMessage(1, getPrimaryStream()); + } + if (remainderStream_ != null) { + output.writeMessage(2, getRemainderStream()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (primaryStream_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getPrimaryStream()); + } + if (remainderStream_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRemainderStream()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse other = + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) obj; + + if (hasPrimaryStream() != other.hasPrimaryStream()) return false; + if (hasPrimaryStream()) { + if (!getPrimaryStream().equals(other.getPrimaryStream())) return false; + } + if (hasRemainderStream() != other.hasRemainderStream()) return false; + if (hasRemainderStream()) { + if (!getRemainderStream().equals(other.getRemainderStream())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasPrimaryStream()) { + hash = (37 * hash) + PRIMARY_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getPrimaryStream().hashCode(); + } + if (hasRemainderStream()) { + hash = (37 * hash) + REMAINDER_STREAM_FIELD_NUMBER; + hash = (53 * hash) + getRemainderStream().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Response message for `SplitReadStream`.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.class, + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.Builder.class); + } + + // Construct using + // com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (primaryStreamBuilder_ == null) { + primaryStream_ = null; + } else { + primaryStream_ = null; + primaryStreamBuilder_ = null; + } + if (remainderStreamBuilder_ == null) { + remainderStream_ = null; + } else { + remainderStream_ = null; + remainderStreamBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse build() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse result = + new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse(this); + if (primaryStreamBuilder_ == null) { + result.primaryStream_ = primaryStream_; + } else { + result.primaryStream_ = primaryStreamBuilder_.build(); + } + if (remainderStreamBuilder_ == null) { + result.remainderStream_ = remainderStream_; + } else { + result.remainderStream_ = remainderStreamBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse.getDefaultInstance()) + return this; + if (other.hasPrimaryStream()) { + mergePrimaryStream(other.getPrimaryStream()); + } + if (other.hasRemainderStream()) { + mergeRemainderStream(other.getRemainderStream()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadStream primaryStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + primaryStreamBuilder_; + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + public boolean hasPrimaryStream() { + return primaryStreamBuilder_ != null || primaryStream_ != null; + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getPrimaryStream() { + if (primaryStreamBuilder_ == null) { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } else { + return primaryStreamBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder setPrimaryStream(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (primaryStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + primaryStream_ = value; + onChanged(); + } else { + primaryStreamBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder setPrimaryStream( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (primaryStreamBuilder_ == null) { + primaryStream_ = builderForValue.build(); + onChanged(); + } else { + primaryStreamBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder mergePrimaryStream(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (primaryStreamBuilder_ == null) { + if (primaryStream_ != null) { + primaryStream_ = + com.google.cloud.bigquery.storage.v1beta2.ReadStream.newBuilder(primaryStream_) + .mergeFrom(value) + .buildPartial(); + } else { + primaryStream_ = value; + } + onChanged(); + } else { + primaryStreamBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public Builder clearPrimaryStream() { + if (primaryStreamBuilder_ == null) { + primaryStream_ = null; + onChanged(); + } else { + primaryStream_ = null; + primaryStreamBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder getPrimaryStreamBuilder() { + + onChanged(); + return getPrimaryStreamFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder + getPrimaryStreamOrBuilder() { + if (primaryStreamBuilder_ != null) { + return primaryStreamBuilder_.getMessageOrBuilder(); + } else { + return primaryStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : primaryStream_; + } + } + /** + * + * + *
+     * Primary stream, which contains the beginning portion of
+     * |original_stream|. An empty value indicates that the original stream can no
+     * longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + getPrimaryStreamFieldBuilder() { + if (primaryStreamBuilder_ == null) { + primaryStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder>( + getPrimaryStream(), getParentForChildren(), isClean()); + primaryStream_ = null; + } + return primaryStreamBuilder_; + } + + private com.google.cloud.bigquery.storage.v1beta2.ReadStream remainderStream_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + remainderStreamBuilder_; + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + public boolean hasRemainderStream() { + return remainderStreamBuilder_ != null || remainderStream_ != null; + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream getRemainderStream() { + if (remainderStreamBuilder_ == null) { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } else { + return remainderStreamBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder setRemainderStream(com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (remainderStreamBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + remainderStream_ = value; + onChanged(); + } else { + remainderStreamBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder setRemainderStream( + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder builderForValue) { + if (remainderStreamBuilder_ == null) { + remainderStream_ = builderForValue.build(); + onChanged(); + } else { + remainderStreamBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder mergeRemainderStream( + com.google.cloud.bigquery.storage.v1beta2.ReadStream value) { + if (remainderStreamBuilder_ == null) { + if (remainderStream_ != null) { + remainderStream_ = + com.google.cloud.bigquery.storage.v1beta2.ReadStream.newBuilder(remainderStream_) + .mergeFrom(value) + .buildPartial(); + } else { + remainderStream_ = value; + } + onChanged(); + } else { + remainderStreamBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public Builder clearRemainderStream() { + if (remainderStreamBuilder_ == null) { + remainderStream_ = null; + onChanged(); + } else { + remainderStream_ = null; + remainderStreamBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder + getRemainderStreamBuilder() { + + onChanged(); + return getRemainderStreamFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder + getRemainderStreamOrBuilder() { + if (remainderStreamBuilder_ != null) { + return remainderStreamBuilder_.getMessageOrBuilder(); + } else { + return remainderStream_ == null + ? com.google.cloud.bigquery.storage.v1beta2.ReadStream.getDefaultInstance() + : remainderStream_; + } + } + /** + * + * + *
+     * Remainder stream, which contains the tail of |original_stream|. An empty
+     * value indicates that the original stream can no longer be split.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder> + getRemainderStreamFieldBuilder() { + if (remainderStreamBuilder_ == null) { + remainderStreamBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ReadStream, + com.google.cloud.bigquery.storage.v1beta2.ReadStream.Builder, + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder>( + getRemainderStream(), getParentForChildren(), isClean()); + remainderStream_ = null; + } + return remainderStreamBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + private static final com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public SplitReadStreamResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SplitReadStreamResponse(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java new file mode 100644 index 0000000000..adb37ece00 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface SplitReadStreamResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.SplitReadStreamResponse) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return Whether the primaryStream field is set. + */ + boolean hasPrimaryStream(); + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + * + * @return The primaryStream. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStream getPrimaryStream(); + /** + * + * + *
+   * Primary stream, which contains the beginning portion of
+   * |original_stream|. An empty value indicates that the original stream can no
+   * longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream primary_stream = 1; + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getPrimaryStreamOrBuilder(); + + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return Whether the remainderStream field is set. + */ + boolean hasRemainderStream(); + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + * + * @return The remainderStream. + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStream getRemainderStream(); + /** + * + * + *
+   * Remainder stream, which contains the tail of |original_stream|. An empty
+   * value indicates that the original stream can no longer be split.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.ReadStream remainder_stream = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.ReadStreamOrBuilder getRemainderStreamOrBuilder(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java new file mode 100644 index 0000000000..4e8dd74a5a --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java @@ -0,0 +1,233 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public final class StorageProto { + private StorageProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n3google/cloud/bigquery/storage/v1beta2/" + + "storage.proto\022%google.cloud.bigquery.sto" + + "rage.v1beta2\032\034google/api/annotations.pro" + + "to\032\027google/api/client.proto\032\037google/api/" + + "field_behavior.proto\032\031google/api/resourc" + + "e.proto\0321google/cloud/bigquery/storage/v" + + "1beta2/arrow.proto\0320google/cloud/bigquer" + + "y/storage/v1beta2/avro.proto\0322google/clo" + + "ud/bigquery/storage/v1beta2/stream.proto" + + "\"\310\001\n\030CreateReadSessionRequest\022C\n\006parent\030" + + "\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.goog" + + "leapis.com/Project\022M\n\014read_session\030\002 \001(\013" + + "22.google.cloud.bigquery.storage.v1beta2" + + ".ReadSessionB\003\340A\002\022\030\n\020max_stream_count\030\003 " + + "\001(\005\";\n\017ReadRowsRequest\022\030\n\013read_stream\030\001 " + + "\001(\tB\003\340A\002\022\016\n\006offset\030\002 \001(\003\")\n\rThrottleStat" + + "e\022\030\n\020throttle_percent\030\001 \001(\005\"\234\001\n\013StreamSt" + + "ats\022M\n\010progress\030\002 \001(\0132;.google.cloud.big" + + "query.storage.v1beta2.StreamStats.Progre" + + "ss\032>\n\010Progress\022\031\n\021at_response_start\030\001 \001(" + + "\001\022\027\n\017at_response_end\030\002 \001(\001\"\333\002\n\020ReadRowsR" + + "esponse\022D\n\tavro_rows\030\003 \001(\0132/.google.clou" + + "d.bigquery.storage.v1beta2.AvroRowsH\000\022U\n" + + "\022arrow_record_batch\030\004 \001(\01327.google.cloud" + + ".bigquery.storage.v1beta2.ArrowRecordBat" + + "chH\000\022\021\n\trow_count\030\006 \001(\003\022A\n\005stats\030\002 \001(\01322" + + ".google.cloud.bigquery.storage.v1beta2.S" + + "treamStats\022L\n\016throttle_state\030\005 \001(\01324.goo" + + "gle.cloud.bigquery.storage.v1beta2.Throt" + + "tleStateB\006\n\004rows\"k\n\026SplitReadStreamReque" + + "st\022?\n\004name\030\001 \001(\tB1\340A\002\372A+\n)bigquerystorag" + + "e.googleapis.com/ReadStream\022\020\n\010fraction\030" + + "\002 \001(\001\"\261\001\n\027SplitReadStreamResponse\022I\n\016pri" + + "mary_stream\030\001 \001(\01321.google.cloud.bigquer" + + "y.storage.v1beta2.ReadStream\022K\n\020remainde" + + "r_stream\030\002 \001(\01321.google.cloud.bigquery.s" + + "torage.v1beta2.ReadStream2\363\006\n\014BigQueryRe" + + "ad\022\370\001\n\021CreateReadSession\022?.google.cloud." + + "bigquery.storage.v1beta2.CreateReadSessi" + + "onRequest\0322.google.cloud.bigquery.storag" + + "e.v1beta2.ReadSession\"n\202\323\344\223\002A\".google.cloud.bigquery.storage.v1beta" + + "2.SplitReadStreamResponse\"C\202\323\344\223\002=\022;/v1be" + + "ta2/{name=projects/*/locations/*/session" + + "s/*/streams/*}\032\256\001\312A\036bigquerystorage.goog" + + "leapis.com\322A\211\001https://www.googleapis.com" + + "/auth/bigquery,https://www.googleapis.co" + + "m/auth/bigquery.readonly,https://www.goo" + + "gleapis.com/auth/cloud-platformB\211\001\n)com." + + "google.cloud.bigquery.storage.v1beta2B\014S" + + "torageProtoP\001ZLgoogle.golang.org/genprot" + + "o/googleapis/cloud/bigquery/storage/v1be" + + "ta2;storageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.AnnotationsProto.getDescriptor(), + com.google.api.ClientProto.getDescriptor(), + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.StreamProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_CreateReadSessionRequest_descriptor, + new java.lang.String[] { + "Parent", "ReadSession", "MaxStreamCount", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsRequest_descriptor, + new java.lang.String[] { + "ReadStream", "Offset", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor, + new java.lang.String[] { + "ThrottlePercent", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor, + new java.lang.String[] { + "Progress", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor, + new java.lang.String[] { + "AtResponseStart", "AtResponseEnd", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor, + new java.lang.String[] { + "AvroRows", "ArrowRecordBatch", "RowCount", "Stats", "ThrottleState", "Rows", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor, + new java.lang.String[] { + "Name", "Fraction", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamResponse_descriptor, + new java.lang.String[] { + "PrimaryStream", "RemainderStream", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.ClientProto.defaultHost); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.AnnotationsProto.http); + registry.add(com.google.api.ClientProto.methodSignature); + registry.add(com.google.api.ClientProto.oauthScopes); + registry.add(com.google.api.ResourceProto.resourceReference); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.AnnotationsProto.getDescriptor(); + com.google.api.ClientProto.getDescriptor(); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.StreamProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java new file mode 100644 index 0000000000..c50e3c3cde --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java @@ -0,0 +1,163 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/stream.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public final class StreamProto { + private StreamProto() {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} + + public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { + registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); + } + + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + return descriptor; + } + + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; + + static { + java.lang.String[] descriptorData = { + "\n2google/cloud/bigquery/storage/v1beta2/" + + "stream.proto\022%google.cloud.bigquery.stor" + + "age.v1beta2\032\037google/api/field_behavior.p" + + "roto\032\031google/api/resource.proto\0321google/" + + "cloud/bigquery/storage/v1beta2/arrow.pro" + + "to\0320google/cloud/bigquery/storage/v1beta" + + "2/avro.proto\032\037google/protobuf/timestamp." + + "proto\"\336\006\n\013ReadSession\022\021\n\004name\030\001 \001(\tB\003\340A\003" + + "\0224\n\013expire_time\030\002 \001(\0132\032.google.protobuf." + + "TimestampB\003\340A\003\022K\n\013data_format\030\003 \001(\01621.go" + + "ogle.cloud.bigquery.storage.v1beta2.Data" + + "FormatB\003\340A\005\022M\n\013avro_schema\030\004 \001(\01321.googl" + + "e.cloud.bigquery.storage.v1beta2.AvroSch" + + "emaB\003\340A\003H\000\022O\n\014arrow_schema\030\005 \001(\01322.googl" + + "e.cloud.bigquery.storage.v1beta2.ArrowSc" + + "hemaB\003\340A\003H\000\022\r\n\005table\030\006 \001(\t\022_\n\017table_modi" + + "fiers\030\007 \001(\0132A.google.cloud.bigquery.stor" + + "age.v1beta2.ReadSession.TableModifiersB\003" + + "\340A\001\022^\n\014read_options\030\010 \001(\0132C.google.cloud" + + ".bigquery.storage.v1beta2.ReadSession.Ta" + + "bleReadOptionsB\003\340A\001\022G\n\007streams\030\n \003(\01321.g" + + "oogle.cloud.bigquery.storage.v1beta2.Rea" + + "dStreamB\003\340A\003\032C\n\016TableModifiers\0221\n\rsnapsh" + + "ot_time\030\001 \001(\0132\032.google.protobuf.Timestam" + + "p\032D\n\020TableReadOptions\022\027\n\017selected_fields" + + "\030\001 \003(\t\022\027\n\017row_restriction\030\002 \001(\t:k\352Ah\n*bi" + + "gquerystorage.googleapis.com/ReadSession" + + "\022:projects/{project}/locations/{location" + + "}/sessions/{session}B\010\n\006schema\"\234\001\n\nReadS" + + "tream\022\021\n\004name\030\001 \001(\tB\003\340A\003:{\352Ax\n)bigquerys" + + "torage.googleapis.com/ReadStream\022Kprojec" + + "ts/{project}/locations/{location}/sessio" + + "ns/{session}/streams/{stream}*>\n\nDataFor" + + "mat\022\033\n\027DATA_FORMAT_UNSPECIFIED\020\000\022\010\n\004AVRO" + + "\020\001\022\t\n\005ARROW\020\002B\210\001\n)com.google.cloud.bigqu" + + "ery.storage.v1beta2B\013StreamProtoP\001ZLgoog" + + "le.golang.org/genproto/googleapis/cloud/" + + "bigquery/storage/v1beta2;storageb\006proto3" + }; + descriptor = + com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( + descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + com.google.api.FieldBehaviorProto.getDescriptor(), + com.google.api.ResourceProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(), + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(), + com.google.protobuf.TimestampProto.getDescriptor(), + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor, + new java.lang.String[] { + "Name", + "ExpireTime", + "DataFormat", + "AvroSchema", + "ArrowSchema", + "Table", + "TableModifiers", + "ReadOptions", + "Streams", + "Schema", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor + .getNestedTypes() + .get(0); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableModifiers_descriptor, + new java.lang.String[] { + "SnapshotTime", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor = + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_descriptor + .getNestedTypes() + .get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadSession_TableReadOptions_descriptor, + new java.lang.String[] { + "SelectedFields", "RowRestriction", + }); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_cloud_bigquery_storage_v1beta2_ReadStream_descriptor, + new java.lang.String[] { + "Name", + }); + com.google.protobuf.ExtensionRegistry registry = + com.google.protobuf.ExtensionRegistry.newInstance(); + registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); + registry.add(com.google.api.ResourceProto.resource); + com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( + descriptor, registry); + com.google.api.FieldBehaviorProto.getDescriptor(); + com.google.api.ResourceProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.ArrowProto.getDescriptor(); + com.google.cloud.bigquery.storage.v1beta2.AvroProto.getDescriptor(); + com.google.protobuf.TimestampProto.getDescriptor(); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java new file mode 100644 index 0000000000..2b84ba4cfe --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java @@ -0,0 +1,1438 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Estimated stream statistics for a given Stream.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats} + */ +public final class StreamStats extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.StreamStats) + StreamStatsOrBuilder { + private static final long serialVersionUID = 0L; + // Use StreamStats.newBuilder() to construct. + private StreamStats(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StreamStats() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StreamStats(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private StreamStats( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 18: + { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder subBuilder = + null; + if (progress_ != null) { + subBuilder = progress_.toBuilder(); + } + progress_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(progress_); + progress_ = subBuilder.buildPartial(); + } + + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder.class); + } + + public interface ProgressOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by
+     * the server so far, not including the rows in the current response
+     * message.
+     * This value, along with `at_response_end`, can be used to interpolate
+     * the progress made as the rows in the message are being processed using
+     * the following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the
+     * `at_response_start` value of the current response.
+     * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + double getAtResponseStart(); + + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the
+     * rows in the current response.
+     * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + double getAtResponseEnd(); + } + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats.Progress} */ + public static final class Progress extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + ProgressOrBuilder { + private static final long serialVersionUID = 0L; + // Use Progress.newBuilder() to construct. + private Progress(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private Progress() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new Progress(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private Progress( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 9: + { + atResponseStart_ = input.readDouble(); + break; + } + case 17: + { + atResponseEnd_ = input.readDouble(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder.class); + } + + public static final int AT_RESPONSE_START_FIELD_NUMBER = 1; + private double atResponseStart_; + /** + * + * + *
+     * The fraction of rows assigned to the stream that have been processed by
+     * the server so far, not including the rows in the current response
+     * message.
+     * This value, along with `at_response_end`, can be used to interpolate
+     * the progress made as the rows in the message are being processed using
+     * the following formula: `at_response_start + (at_response_end -
+     * at_response_start) * rows_processed_from_response / rows_in_response`.
+     * Note that if a filter is provided, the `at_response_end` value of the
+     * previous response may not necessarily be equal to the
+     * `at_response_start` value of the current response.
+     * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + public double getAtResponseStart() { + return atResponseStart_; + } + + public static final int AT_RESPONSE_END_FIELD_NUMBER = 2; + private double atResponseEnd_; + /** + * + * + *
+     * Similar to `at_response_start`, except that this value includes the
+     * rows in the current response.
+     * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + public double getAtResponseEnd() { + return atResponseEnd_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (atResponseStart_ != 0D) { + output.writeDouble(1, atResponseStart_); + } + if (atResponseEnd_ != 0D) { + output.writeDouble(2, atResponseEnd_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (atResponseStart_ != 0D) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(1, atResponseStart_); + } + if (atResponseEnd_ != 0D) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(2, atResponseEnd_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress other = + (com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) obj; + + if (java.lang.Double.doubleToLongBits(getAtResponseStart()) + != java.lang.Double.doubleToLongBits(other.getAtResponseStart())) return false; + if (java.lang.Double.doubleToLongBits(getAtResponseEnd()) + != java.lang.Double.doubleToLongBits(other.getAtResponseEnd())) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + AT_RESPONSE_START_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getAtResponseStart())); + hash = (37 * hash) + AT_RESPONSE_END_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getAtResponseEnd())); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats.Progress} */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + atResponseStart_ = 0D; + + atResponseEnd_ = 0D; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_Progress_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress build() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress result = + new com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress(this); + result.atResponseStart_ = atResponseStart_; + result.atResponseEnd_ = atResponseEnd_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress other) { + if (other + == com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance()) + return this; + if (other.getAtResponseStart() != 0D) { + setAtResponseStart(other.getAtResponseStart()); + } + if (other.getAtResponseEnd() != 0D) { + setAtResponseEnd(other.getAtResponseEnd()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private double atResponseStart_; + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @return The atResponseStart. + */ + public double getAtResponseStart() { + return atResponseStart_; + } + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @param value The atResponseStart to set. + * @return This builder for chaining. + */ + public Builder setAtResponseStart(double value) { + + atResponseStart_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * The fraction of rows assigned to the stream that have been processed by
+       * the server so far, not including the rows in the current response
+       * message.
+       * This value, along with `at_response_end`, can be used to interpolate
+       * the progress made as the rows in the message are being processed using
+       * the following formula: `at_response_start + (at_response_end -
+       * at_response_start) * rows_processed_from_response / rows_in_response`.
+       * Note that if a filter is provided, the `at_response_end` value of the
+       * previous response may not necessarily be equal to the
+       * `at_response_start` value of the current response.
+       * 
+ * + * double at_response_start = 1; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseStart() { + + atResponseStart_ = 0D; + onChanged(); + return this; + } + + private double atResponseEnd_; + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @return The atResponseEnd. + */ + public double getAtResponseEnd() { + return atResponseEnd_; + } + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @param value The atResponseEnd to set. + * @return This builder for chaining. + */ + public Builder setAtResponseEnd(double value) { + + atResponseEnd_ = value; + onChanged(); + return this; + } + /** + * + * + *
+       * Similar to `at_response_start`, except that this value includes the
+       * rows in the current response.
+       * 
+ * + * double at_response_end = 2; + * + * @return This builder for chaining. + */ + public Builder clearAtResponseEnd() { + + atResponseEnd_ = 0D; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.StreamStats.Progress) + private static final com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public Progress parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Progress(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 2; + private com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress_; + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return progress_ != null; + } + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress getProgress() { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder + getProgressOrBuilder() { + return getProgress(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (progress_ != null) { + output.writeMessage(2, getProgress()); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (progress_ != null) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getProgress()); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.StreamStats other = + (com.google.cloud.bigquery.storage.v1beta2.StreamStats) obj; + + if (hasProgress() != other.hasProgress()) return false; + if (hasProgress()) { + if (!getProgress().equals(other.getProgress())) return false; + } + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasProgress()) { + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = (53 * hash) + getProgress().hashCode(); + } + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.StreamStats prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Estimated stream statistics for a given Stream.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StreamStats} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.StreamStats) + com.google.cloud.bigquery.storage.v1beta2.StreamStatsOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.class, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.StreamStats.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + if (progressBuilder_ == null) { + progress_ = null; + } else { + progress_ = null; + progressBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_StreamStats_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats build() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.StreamStats result = + new com.google.cloud.bigquery.storage.v1beta2.StreamStats(this); + if (progressBuilder_ == null) { + result.progress_ = progress_; + } else { + result.progress_ = progressBuilder_.build(); + } + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.StreamStats) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.StreamStats) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.StreamStats other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.StreamStats.getDefaultInstance()) + return this; + if (other.hasProgress()) { + mergeProgress(other.getProgress()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.StreamStats parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.StreamStats) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder> + progressBuilder_; + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + public boolean hasProgress() { + return progressBuilder_ != null || progress_ != null; + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress getProgress() { + if (progressBuilder_ == null) { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } else { + return progressBuilder_.getMessage(); + } + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder setProgress( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress value) { + if (progressBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + progress_ = value; + onChanged(); + } else { + progressBuilder_.setMessage(value); + } + + return this; + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder setProgress( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder builderForValue) { + if (progressBuilder_ == null) { + progress_ = builderForValue.build(); + onChanged(); + } else { + progressBuilder_.setMessage(builderForValue.build()); + } + + return this; + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder mergeProgress( + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress value) { + if (progressBuilder_ == null) { + if (progress_ != null) { + progress_ = + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.newBuilder(progress_) + .mergeFrom(value) + .buildPartial(); + } else { + progress_ = value; + } + onChanged(); + } else { + progressBuilder_.mergeFrom(value); + } + + return this; + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public Builder clearProgress() { + if (progressBuilder_ == null) { + progress_ = null; + onChanged(); + } else { + progress_ = null; + progressBuilder_ = null; + } + + return this; + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder + getProgressBuilder() { + + onChanged(); + return getProgressFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + public com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder + getProgressOrBuilder() { + if (progressBuilder_ != null) { + return progressBuilder_.getMessageOrBuilder(); + } else { + return progress_ == null + ? com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.getDefaultInstance() + : progress_; + } + } + /** + * + * + *
+     * Represents the progress of the current stream.
+     * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder> + getProgressFieldBuilder() { + if (progressBuilder_ == null) { + progressBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress.Builder, + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder>( + getProgress(), getParentForChildren(), isClean()); + progress_ = null; + } + return progressBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.StreamStats) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.StreamStats) + private static final com.google.cloud.bigquery.storage.v1beta2.StreamStats DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.StreamStats(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.StreamStats getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StreamStats parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StreamStats(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.StreamStats getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java new file mode 100644 index 0000000000..ed9f32db50 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java @@ -0,0 +1,60 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface StreamStatsOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.StreamStats) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return Whether the progress field is set. + */ + boolean hasProgress(); + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + * + * @return The progress. + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStats.Progress getProgress(); + /** + * + * + *
+   * Represents the progress of the current stream.
+   * 
+ * + * .google.cloud.bigquery.storage.v1beta2.StreamStats.Progress progress = 2; + */ + com.google.cloud.bigquery.storage.v1beta2.StreamStats.ProgressOrBuilder getProgressOrBuilder(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java new file mode 100644 index 0000000000..1d45a7c2a5 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java @@ -0,0 +1,549 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +/** + * + * + *
+ * Information on if the current connection is being throttled.
+ * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ThrottleState} + */ +public final class ThrottleState extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ThrottleState) + ThrottleStateOrBuilder { + private static final long serialVersionUID = 0L; + // Use ThrottleState.newBuilder() to construct. + private ThrottleState(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private ThrottleState() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new ThrottleState(); + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private ThrottleState( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + throttlePercent_ = input.readInt32(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.class, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder.class); + } + + public static final int THROTTLE_PERCENT_FIELD_NUMBER = 1; + private int throttlePercent_; + /** + * + * + *
+   * How much this connection is being throttled. Zero means no throttling,
+   * 100 means fully throttled.
+   * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + public int getThrottlePercent() { + return throttlePercent_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (throttlePercent_ != 0) { + output.writeInt32(1, throttlePercent_); + } + unknownFields.writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (throttlePercent_ != 0) { + size += com.google.protobuf.CodedOutputStream.computeInt32Size(1, throttlePercent_); + } + size += unknownFields.getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.cloud.bigquery.storage.v1beta2.ThrottleState)) { + return super.equals(obj); + } + com.google.cloud.bigquery.storage.v1beta2.ThrottleState other = + (com.google.cloud.bigquery.storage.v1beta2.ThrottleState) obj; + + if (getThrottlePercent() != other.getThrottlePercent()) return false; + if (!unknownFields.equals(other.unknownFields)) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + THROTTLE_PERCENT_FIELD_NUMBER; + hash = (53 * hash) + getThrottlePercent(); + hash = (29 * hash) + unknownFields.hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Information on if the current connection is being throttled.
+   * 
+ * + * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ThrottleState} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ThrottleState) + com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.class, + com.google.cloud.bigquery.storage.v1beta2.ThrottleState.Builder.class); + } + + // Construct using com.google.cloud.bigquery.storage.v1beta2.ThrottleState.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} + } + + @java.lang.Override + public Builder clear() { + super.clear(); + throttlePercent_ = 0; + + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.cloud.bigquery.storage.v1beta2.StorageProto + .internal_static_google_cloud_bigquery_storage_v1beta2_ThrottleState_descriptor; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getDefaultInstanceForType() { + return com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance(); + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState build() { + com.google.cloud.bigquery.storage.v1beta2.ThrottleState result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState buildPartial() { + com.google.cloud.bigquery.storage.v1beta2.ThrottleState result = + new com.google.cloud.bigquery.storage.v1beta2.ThrottleState(this); + result.throttlePercent_ = throttlePercent_; + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.cloud.bigquery.storage.v1beta2.ThrottleState) { + return mergeFrom((com.google.cloud.bigquery.storage.v1beta2.ThrottleState) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ThrottleState other) { + if (other == com.google.cloud.bigquery.storage.v1beta2.ThrottleState.getDefaultInstance()) + return this; + if (other.getThrottlePercent() != 0) { + setThrottlePercent(other.getThrottlePercent()); + } + this.mergeUnknownFields(other.unknownFields); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + com.google.cloud.bigquery.storage.v1beta2.ThrottleState parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = + (com.google.cloud.bigquery.storage.v1beta2.ThrottleState) e.getUnfinishedMessage(); + throw e.unwrapIOException(); + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + + private int throttlePercent_; + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + public int getThrottlePercent() { + return throttlePercent_; + } + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @param value The throttlePercent to set. + * @return This builder for chaining. + */ + public Builder setThrottlePercent(int value) { + + throttlePercent_ = value; + onChanged(); + return this; + } + /** + * + * + *
+     * How much this connection is being throttled. Zero means no throttling,
+     * 100 means fully throttled.
+     * 
+ * + * int32 throttle_percent = 1; + * + * @return This builder for chaining. + */ + public Builder clearThrottlePercent() { + + throttlePercent_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1beta2.ThrottleState) + } + + // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1beta2.ThrottleState) + private static final com.google.cloud.bigquery.storage.v1beta2.ThrottleState DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1beta2.ThrottleState(); + } + + public static com.google.cloud.bigquery.storage.v1beta2.ThrottleState getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public ThrottleState parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ThrottleState(input, extensionRegistry); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java new file mode 100644 index 0000000000..cf416b5ef5 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java @@ -0,0 +1,39 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/cloud/bigquery/storage/v1beta2/storage.proto + +package com.google.cloud.bigquery.storage.v1beta2; + +public interface ThrottleStateOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1beta2.ThrottleState) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * How much this connection is being throttled. Zero means no throttling,
+   * 100 means fully throttled.
+   * 
+ * + * int32 throttle_percent = 1; + * + * @return The throttlePercent. + */ + int getThrottlePercent(); +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto new file mode 100644 index 0000000000..0cca110293 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto @@ -0,0 +1,40 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage"; +option java_multiple_files = true; +option java_outer_classname = "ArrowProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// Arrow schema as specified in +// https://arrow.apache.org/docs/python/api/datatypes.html +// and serialized to bytes using IPC: +// https://arrow.apache.org/docs/ipc.html. +// +// See code samples on how this message can be deserialized. +message ArrowSchema { + // IPC serialized Arrow schema. + bytes serialized_schema = 1; +} + +// Arrow RecordBatch. +message ArrowRecordBatch { + // IPC-serialized Arrow RecordBatch. + bytes serialized_record_batch = 1; +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto new file mode 100644 index 0000000000..3719469066 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto @@ -0,0 +1,36 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage"; +option java_multiple_files = true; +option java_outer_classname = "AvroProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// Avro schema. +message AvroSchema { + // Json serialized schema, as described at + // https://avro.apache.org/docs/1.8.1/spec.html. + string schema = 1; +} + +// Avro rows. +message AvroRows { + // Binary serialized rows in a block. + bytes serialized_binary_rows = 1; +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto new file mode 100644 index 0000000000..be03525403 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto @@ -0,0 +1,227 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta2/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta2/avro.proto"; +import "google/cloud/bigquery/storage/v1beta2/stream.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage"; +option java_multiple_files = true; +option java_outer_classname = "StorageProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// BigQuery Read API. +// +// The Read API can be used to read data from BigQuery. +service BigQueryRead { + option (google.api.default_host) = "bigquerystorage.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/bigquery," + "https://www.googleapis.com/auth/bigquery.readonly," + "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a new read session. A read session divides the contents of a + // BigQuery table into one or more streams, which can then be used to read + // data from the table. The read session also specifies properties of the + // data to be read, such as a list of columns or a push-down filter describing + // the rows to be returned. + // + // A particular row can be read by at most one stream. When the caller has + // reached the end of each stream in the session, then all the data in the + // table has been read. + // + // Data is assigned to each stream such that roughly the same number of + // rows can be read from each stream. Because the server-side unit for + // assigning data is collections of rows, the API does not guarantee that + // each stream will return the same number or rows. Additionally, the + // limits are enforced based on the number of pre-filtered rows, so some + // filters can lead to lopsided assignments. + // + // Read sessions automatically expire 24 hours after they are created and do + // not require manual clean-up by the caller. + rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) { + option (google.api.http) = { + post: "/v1beta2/{read_session.table=projects/*/datasets/*/tables/*}" + body: "*" + }; + option (google.api.method_signature) = + "parent,read_session,max_stream_count"; + } + + // Reads rows from the stream in the format prescribed by the ReadSession. + // Each response contains one or more table rows, up to a maximum of 100 MiB + // per response; read requests which attempt to read individual rows larger + // than 100 MiB will fail. + // + // Each request also returns a set of stream statistics reflecting the current + // state of the stream. + rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) { + option (google.api.http) = { + get: "/v1beta2/{read_stream=projects/*/locations/*/sessions/*/streams/*}" + }; + option (google.api.method_signature) = "read_stream,offset"; + } + + // Splits a given `ReadStream` into two `ReadStream` objects. These + // `ReadStream` objects are referred to as the primary and the residual + // streams of the split. The original `ReadStream` can still be read from in + // the same manner as before. Both of the returned `ReadStream` objects can + // also be read from, and the rows returned by both child streams will be + // the same as the rows read from the original stream. + // + // Moreover, the two child streams will be allocated back-to-back in the + // original `ReadStream`. Concretely, it is guaranteed that for streams + // original, primary, and residual, that original[0-j] = primary[0-j] and + // original[j-n] = residual[0-m] once the streams have been read to + // completion. + rpc SplitReadStream(SplitReadStreamRequest) + returns (SplitReadStreamResponse) { + option (google.api.http) = { + get: "/v1beta2/{name=projects/*/locations/*/sessions/*/streams/*}" + }; + } +} + +// Request message for `CreateReadSession`. +message CreateReadSessionRequest { + // Required. The request project that owns the session, in the form of + // `projects/{project_id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. Session to be created. + ReadSession read_session = 2 [(google.api.field_behavior) = REQUIRED]; + + // Max initial number of streams. If unset or zero, the server will + // provide a value of streams so as to produce reasonable throughput. Must be + // non-negative. The number of streams may be lower than the requested number, + // depending on the amount parallelism that is reasonable for the table. Error + // will be returned if the max count is greater than the current system + // max limit of 1,000. + // + // Streams must be read starting from offset 0. + int32 max_stream_count = 3; +} + +// Request message for `ReadRows`. +message ReadRowsRequest { + // Required. Stream to read rows from. + string read_stream = 1 [(google.api.field_behavior) = REQUIRED]; + + // The offset requested must be less than the last row read from Read. + // Requesting a larger offset is undefined. If not specified, start reading + // from offset zero. + int64 offset = 2; +} + +// Information on if the current connection is being throttled. +message ThrottleState { + // How much this connection is being throttled. Zero means no throttling, + // 100 means fully throttled. + int32 throttle_percent = 1; +} + +// Estimated stream statistics for a given Stream. +message StreamStats { + message Progress { + // The fraction of rows assigned to the stream that have been processed by + // the server so far, not including the rows in the current response + // message. + // + // This value, along with `at_response_end`, can be used to interpolate + // the progress made as the rows in the message are being processed using + // the following formula: `at_response_start + (at_response_end - + // at_response_start) * rows_processed_from_response / rows_in_response`. + // + // Note that if a filter is provided, the `at_response_end` value of the + // previous response may not necessarily be equal to the + // `at_response_start` value of the current response. + double at_response_start = 1; + + // Similar to `at_response_start`, except that this value includes the + // rows in the current response. + double at_response_end = 2; + } + + // Represents the progress of the current stream. + Progress progress = 2; +} + +// Response from calling `ReadRows` may include row data, progress and +// throttling information. +message ReadRowsResponse { + // Row data is returned in format specified during session creation. + oneof rows { + // Serialized row data in AVRO format. + AvroRows avro_rows = 3; + + // Serialized row data in Arrow RecordBatch format. + ArrowRecordBatch arrow_record_batch = 4; + } + + // Number of serialized rows in the rows block. + int64 row_count = 6; + + // Statistics for the stream. + StreamStats stats = 2; + + // Throttling state. If unset, the latest response still describes + // the current throttling status. + ThrottleState throttle_state = 5; +} + +// Request message for `SplitReadStream`. +message SplitReadStreamRequest { + // Required. Name of the stream to split. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "bigquerystorage.googleapis.com/ReadStream" + } + ]; + + // A value in the range (0.0, 1.0) that specifies the fractional point at + // which the original stream should be split. The actual split point is + // evaluated on pre-filtered rows, so if a filter is provided, then there is + // no guarantee that the division of the rows between the new child streams + // will be proportional to this fractional value. Additionally, because the + // server-side unit for assigning data is collections of rows, this fraction + // will always map to a data storage boundary on the server side. + double fraction = 2; +} + +// Response message for `SplitReadStream`. +message SplitReadStreamResponse { + // Primary stream, which contains the beginning portion of + // |original_stream|. An empty value indicates that the original stream can no + // longer be split. + ReadStream primary_stream = 1; + + // Remainder stream, which contains the tail of |original_stream|. An empty + // value indicates that the original stream can no longer be split. + ReadStream remainder_stream = 2; +} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto new file mode 100644 index 0000000000..e5e643b9e2 --- /dev/null +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto @@ -0,0 +1,135 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +syntax = "proto3"; + +package google.cloud.bigquery.storage.v1beta2; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/bigquery/storage/v1beta2/arrow.proto"; +import "google/cloud/bigquery/storage/v1beta2/avro.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2;storage"; +option java_multiple_files = true; +option java_outer_classname = "StreamProto"; +option java_package = "com.google.cloud.bigquery.storage.v1beta2"; + +// Data format for input or output data. +enum DataFormat { + DATA_FORMAT_UNSPECIFIED = 0; + + // Avro is a standard open source row based file format. + // See https://avro.apache.org/ for more details. + AVRO = 1; + + // Arrow is a standard open source column-based message format. + // See https://arrow.apache.org/ for more details. + ARROW = 2; +} + +// Information about the ReadSession. +message ReadSession { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadSession" + pattern: "projects/{project}/locations/{location}/sessions/{session}" + }; + + // Additional attributes when reading a table. + message TableModifiers { + // The snapshot time of the table. If not set, interpreted as now. + google.protobuf.Timestamp snapshot_time = 1; + } + + // Options dictating how we read a table. + message TableReadOptions { + // Names of the fields in the table that should be read. If empty, all + // fields will be read. If the specified field is a nested field, all + // the sub-fields in the field will be selected. The output field order is + // unrelated to the order of fields in selected_fields. + repeated string selected_fields = 1; + + // SQL text filtering statement, similar to a WHERE clause in a query. + // Currently, only a single predicate that is a comparison between a column + // and a constant value is supported. Aggregates are not supported. + // + // Examples: "int_field > 5" + // "date_field = CAST('2014-9-27' as DATE)" + // "nullable_field is not NULL" + // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" + // "numeric_field BETWEEN 1.0 AND 5.0" + string row_restriction = 2; + } + + // Output only. Unique identifier for the session, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time at which the session becomes invalid. After this time, + // subsequent requests to read this Session will return errors. The + // expire_time is automatically assigned and currently cannot be specified or + // updated. + google.protobuf.Timestamp expire_time = 2 + [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Immutable. Data format of the output data. Behavior defaults to Apache + // Avro. + DataFormat data_format = 3 [(google.api.field_behavior) = IMMUTABLE]; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Table that this ReadSession is reading from, in the form + // `projects/{project_id}/datasets/{dataset_id}/tables/{table_id} + string table = 6; + + // Optional. Any modifiers which are applied when reading from the specified + // table. + TableModifiers table_modifiers = 7 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Read options for this session (e.g. column selection, filters). + TableReadOptions read_options = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. A list of streams created with the session. + // + // At least one stream is created with the session. In the future, larger + // request_stream_count values *may* result in this list being unpopulated, + // in that case, the user will need to use a List method to get the streams + // instead, which is not yet available. + repeated ReadStream streams = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Information about a single stream that gets data out of the storage system. +// Most of the information about `ReadStream` instances is aggregated, making +// `ReadStream` lightweight. +message ReadStream { + option (google.api.resource) = { + type: "bigquerystorage.googleapis.com/ReadStream" + pattern: "projects/{project}/locations/{location}/sessions/{session}/streams/{stream}" + }; + + // Output only. Name of the stream, in the form + // `projects/{project_id}/locations/{location}/sessions/{session_id}/streams/{stream_id}`. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/synth.metadata b/synth.metadata index 2957b3fdbb..3c9957eca4 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,19 +1,32 @@ { - "updateTime": "2020-01-16T08:37:10.013112Z", + "updateTime": "2020-01-24T23:47:31.049509Z", "sources": [ { "generator": { "name": "artman", - "version": "0.44.0", - "dockerImage": "googleapis/artman@sha256:10a6d0342b8d62544810ac5ad86c3b21049ec0696608ac60175da8e513234344" + "version": "0.44.1", + "dockerImage": "googleapis/artman@sha256:5599b61e56a372d21b671969ee915fbca0f6c3a0daaeb898d01f8f685f1bbc8b" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "0735b4b096872960568d1f366bfa75b7b0e1f1a3", - "internalRef": "289939042" + "sha": "3febac09e0bf5f813de64a8d3cb69641af92584f" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "3febac09e0bf5f813de64a8d3cb69641af92584f" + } + }, + { + "git": { + "name": "googleapis", + "remote": "https://github.com/googleapis/googleapis.git", + "sha": "3febac09e0bf5f813de64a8d3cb69641af92584f" } }, { @@ -28,353 +41,379 @@ { "client": { "source": "googleapis", - "apiName": "bigquerystorage", + "apiName": "bigquery-storage", "apiVersion": "v1beta1", "language": "java", - "generator": "gapic", - "config": "google/cloud/bigquery/storage/artman_bigquerystorage_v1beta1.yaml" + "generator": "bazel" + } + }, + { + "client": { + "source": "googleapis", + "apiName": "bigquery-storage", + "apiVersion": "v1beta2", + "language": "java", + "generator": "bazel" + } + }, + { + "client": { + "source": "googleapis", + "apiName": "bigquery-storage", + "apiVersion": "v1alpha2", + "language": "java", + "generator": "bazel" } } ], "newFiles": [ { - "path": "pom.xml" + "path": ".github/ISSUE_TEMPLATE/bug_report.md" }, { - "path": ".repo-metadata.json" + "path": ".github/ISSUE_TEMPLATE/feature_request.md" }, { - "path": "java.header" + "path": ".github/ISSUE_TEMPLATE/support_request.md" }, { - "path": "CONTRIBUTING.md" + "path": ".github/PULL_REQUEST_TEMPLATE.md" }, { - "path": "synth.metadata" + "path": ".github/release-please.yml" }, { - "path": "renovate.json" + "path": ".kokoro/build.bat" }, { - "path": "versions.txt" + "path": ".kokoro/build.sh" }, { - "path": "synth.py" + "path": ".kokoro/coerce_logs.sh" }, { - "path": "license-checks.xml" + "path": ".kokoro/common.cfg" }, { - "path": "CHANGELOG.md" + "path": ".kokoro/continuous/common.cfg" }, { - "path": "CODE_OF_CONDUCT.md" + "path": ".kokoro/continuous/dependencies.cfg" }, { - "path": "LICENSE" + "path": ".kokoro/continuous/integration.cfg" }, { - "path": "codecov.yaml" + "path": ".kokoro/continuous/java11.cfg" }, { - "path": ".gitignore" + "path": ".kokoro/continuous/java7.cfg" }, { - "path": "README.md" + "path": ".kokoro/continuous/java8-osx.cfg" }, { - "path": "google-cloud-bigquerystorage/pom.xml" + "path": ".kokoro/continuous/java8-win.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java" + "path": ".kokoro/continuous/java8.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java" + "path": ".kokoro/continuous/lint.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClientTest.java" + "path": ".kokoro/continuous/propose_release.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java" + "path": ".kokoro/continuous/propose_release.sh" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageLongRunningTest.java" + "path": ".kokoro/continuous/samples.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/SimpleRowReader.java" + "path": ".kokoro/dependencies.sh" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/it/ITBigQueryStorageTest.java" + "path": ".kokoro/linkage-monitor.sh" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettingsTest.java" + "path": ".kokoro/nightly/common.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/ResourceHeaderTest.java" + "path": ".kokoro/nightly/dependencies.cfg" }, { - "path": "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsRetryTest.java" + "path": ".kokoro/nightly/integration.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageClient.java" + "path": ".kokoro/nightly/java11.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/package-info.java" + "path": ".kokoro/nightly/java7.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java" + "path": ".kokoro/nightly/java8-osx.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageSettings.java" + "path": ".kokoro/nightly/java8-win.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java" + "path": ".kokoro/nightly/java8.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java" + "path": ".kokoro/nightly/lint.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStub.java" + "path": ".kokoro/nightly/samples.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/BigQueryStorageStubSettings.java" + "path": ".kokoro/presubmit/clirr.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStubSettings.java" + "path": ".kokoro/presubmit/common.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/EnhancedBigQueryStorageStub.java" + "path": ".kokoro/presubmit/dependencies.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageStub.java" + "path": ".kokoro/presubmit/integration.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/ReadRowsResumptionStrategy.java" + "path": ".kokoro/presubmit/java11.cfg" }, { - "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/readrows/package-info.java" + "path": ".kokoro/presubmit/java7.cfg" }, { - "path": "google-cloud-bigquerystorage-bom/pom.xml" + "path": ".kokoro/presubmit/java8-osx.cfg" }, { - "path": ".github/release-please.yml" + "path": ".kokoro/presubmit/java8-win.cfg" }, { - "path": ".github/PULL_REQUEST_TEMPLATE.md" + "path": ".kokoro/presubmit/java8.cfg" }, { - "path": ".github/ISSUE_TEMPLATE/feature_request.md" + "path": ".kokoro/presubmit/linkage-monitor.cfg" }, { - "path": ".github/ISSUE_TEMPLATE/support_request.md" + "path": ".kokoro/presubmit/lint.cfg" }, { - "path": ".github/ISSUE_TEMPLATE/bug_report.md" + "path": ".kokoro/presubmit/samples.cfg" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/pom.xml" + "path": ".kokoro/release/bump_snapshot.cfg" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java" + "path": ".kokoro/release/bump_snapshot.sh" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java" + "path": ".kokoro/release/common.cfg" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java" + "path": ".kokoro/release/common.sh" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java" + "path": ".kokoro/release/drop.cfg" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java" + "path": ".kokoro/release/drop.sh" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/storage.proto" + "path": ".kokoro/release/promote.cfg" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/avro.proto" + "path": ".kokoro/release/promote.sh" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/read_options.proto" + "path": ".kokoro/release/publish_javadoc.cfg" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/arrow.proto" + "path": ".kokoro/release/publish_javadoc.sh" }, { - "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/proto/google/cloud/bigquery/storage/v1beta1/table_reference.proto" + "path": ".kokoro/release/snapshot.cfg" }, { - "path": ".kokoro/build.sh" + "path": ".kokoro/release/snapshot.sh" }, { - "path": ".kokoro/common.cfg" + "path": ".kokoro/release/stage.cfg" }, { - "path": ".kokoro/build.bat" + "path": ".kokoro/release/stage.sh" }, { - "path": ".kokoro/linkage-monitor.sh" + "path": ".kokoro/trampoline.sh" }, { - "path": ".kokoro/coerce_logs.sh" + "path": "CODE_OF_CONDUCT.md" }, { - "path": ".kokoro/dependencies.sh" + "path": "CONTRIBUTING.md" }, { - "path": ".kokoro/trampoline.sh" + "path": "LICENSE" }, { - "path": ".kokoro/continuous/samples.cfg" + "path": "codecov.yaml" }, { - "path": ".kokoro/continuous/java8.cfg" + "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java" }, { - "path": ".kokoro/continuous/integration.cfg" + "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java" }, { - "path": ".kokoro/continuous/java11.cfg" + "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java" }, { - "path": ".kokoro/continuous/propose_release.cfg" + "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/stub/GrpcBigQueryStorageCallableFactory.java" }, { - "path": ".kokoro/continuous/common.cfg" + "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BaseBigQueryReadClient.java" }, { - "path": ".kokoro/continuous/propose_release.sh" + "path": "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/stub/GrpcBigQueryReadCallableFactory.java" }, { - "path": ".kokoro/continuous/java7.cfg" + "path": "grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java" }, { - "path": ".kokoro/continuous/lint.cfg" + "path": "grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java" }, { - "path": ".kokoro/continuous/java8-osx.cfg" + "path": "grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java" }, { - "path": ".kokoro/continuous/dependencies.cfg" + "path": "java.header" }, { - "path": ".kokoro/continuous/java8-win.cfg" + "path": "license-checks.xml" }, { - "path": ".kokoro/nightly/samples.cfg" + "path": "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java" }, { - "path": ".kokoro/nightly/java8.cfg" + "path": "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java" }, { - "path": ".kokoro/nightly/integration.cfg" + "path": "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java" }, { - "path": ".kokoro/nightly/java11.cfg" + "path": "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java" }, { - "path": ".kokoro/nightly/common.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java" }, { - "path": ".kokoro/nightly/java7.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java" }, { - "path": ".kokoro/nightly/lint.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ReadOptions.java" }, { - "path": ".kokoro/nightly/java8-osx.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/Storage.java" }, { - "path": ".kokoro/nightly/dependencies.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/TableReferenceProto.java" }, { - "path": ".kokoro/nightly/java8-win.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowProto.java" }, { - "path": ".kokoro/release/publish_javadoc.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatch.java" }, { - "path": ".kokoro/release/common.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowRecordBatchOrBuilder.java" }, { - "path": ".kokoro/release/promote.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchema.java" }, { - "path": ".kokoro/release/common.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ArrowSchemaOrBuilder.java" }, { - "path": ".kokoro/release/publish_javadoc.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroProto.java" }, { - "path": ".kokoro/release/drop.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRows.java" }, { - "path": ".kokoro/release/stage.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroRowsOrBuilder.java" }, { - "path": ".kokoro/release/promote.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchema.java" }, { - "path": ".kokoro/release/snapshot.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AvroSchemaOrBuilder.java" }, { - "path": ".kokoro/release/bump_snapshot.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequest.java" }, { - "path": ".kokoro/release/drop.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CreateReadSessionRequestOrBuilder.java" }, { - "path": ".kokoro/release/bump_snapshot.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/DataFormat.java" }, { - "path": ".kokoro/release/stage.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequest.java" }, { - "path": ".kokoro/release/snapshot.sh" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsRequestOrBuilder.java" }, { - "path": ".kokoro/presubmit/samples.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java" }, { - "path": ".kokoro/presubmit/java8.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java" }, { - "path": ".kokoro/presubmit/integration.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java" }, { - "path": ".kokoro/presubmit/java11.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSessionOrBuilder.java" }, { - "path": ".kokoro/presubmit/linkage-monitor.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStream.java" }, { - "path": ".kokoro/presubmit/common.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadStreamOrBuilder.java" }, { - "path": ".kokoro/presubmit/java7.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequest.java" }, { - "path": ".kokoro/presubmit/lint.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamRequestOrBuilder.java" }, { - "path": ".kokoro/presubmit/java8-osx.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponse.java" }, { - "path": ".kokoro/presubmit/dependencies.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/SplitReadStreamResponseOrBuilder.java" }, { - "path": ".kokoro/presubmit/clirr.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java" }, { - "path": ".kokoro/presubmit/java8-win.cfg" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamProto.java" }, { - "path": "grpc-google-cloud-bigquerystorage-v1beta1/pom.xml" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStats.java" }, { - "path": "grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java" + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StreamStatsOrBuilder.java" + }, + { + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleState.java" + }, + { + "path": "proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ThrottleStateOrBuilder.java" + }, + { + "path": "renovate.json" } ] } \ No newline at end of file diff --git a/synth.py b/synth.py index 3cddd75c2d..b28fab69de 100644 --- a/synth.py +++ b/synth.py @@ -19,83 +19,18 @@ import synthtool.languages.java as java gapic = gcp.GAPICGenerator() - - -protobuf_header = "// Generated by the protocol buffer compiler. DO NOT EDIT!" -# License header -license_header = """/* - * Copyright 2019 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -""" -bad_license_header = """/\\* - \\* Copyright 2018 Google LLC - \\* - \\* Licensed under the Apache License, Version 2.0 \\(the "License"\\); you may not use this file except - \\* in compliance with the License. You may obtain a copy of the License at - \\* - \\* http://www.apache.org/licenses/LICENSE-2.0 - \\* - \\* Unless required by applicable law or agreed to in writing, software distributed under the License - \\* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express - \\* or implied. See the License for the specific language governing permissions and limitations under - \\* the License. - \\*/ -""" - service = 'bigquerystorage' -versions = ['v1beta1'] -config_pattern = '/google/cloud/bigquery/storage/artman_bigquerystorage_{version}.yaml' -package = 'com.google.cloud.bigquery.storage.v1beta1' +versions = ['v1beta1', 'v1beta2', 'v1alpha2'] for version in versions: - library = gapic.java_library( - service=service, + java.bazel_library( + service='bigquery-storage', version=version, - config_path=config_pattern.format(version=version), - artman_output_name='') - - s.replace( - library / f'proto-google-cloud-{service}-{version}/src/**/*.java', - protobuf_header, - f'{license_header}{protobuf_header}' - ) - - s.replace( - library / f'grpc-google-cloud-{service}-{version}/src/**/*.java', - bad_license_header, - license_header + package_pattern='com.google.cloud.bigquery.storage.{version}', + proto_path=f'google/cloud/bigquery/storage/{version}', + bazel_target=f'//google/cloud/bigquery/storage/{version}:google-cloud-bigquery-storage-{version}-java', + destination_name='bigquerystorage', ) - s.replace( - library / f'proto-google-cloud-{service}-{version}/src/**/*.java', - bad_license_header, - license_header - ) - - s.replace( - library / f'grpc-google-cloud-{service}-{version}/src/**/*.java', - f'package {package};', - f'{license_header}package {package};' - ) - - s.copy(library / f'gapic-google-cloud-{service}-{version}/src', f'google-cloud-{service}/src') - s.copy(library / f'grpc-google-cloud-{service}-{version}/src', f'grpc-google-cloud-{service}-{version}/src') - s.copy(library / f'proto-google-cloud-{service}-{version}/src', f'proto-google-cloud-{service}-{version}/src') - - java.format_code(f'google-cloud-{service}/src') - java.format_code(f'grpc-google-cloud-{service}-{version}/src') - java.format_code(f'proto-google-cloud-{service}-{version}/src') common_templates = gcp.CommonTemplates() templates = common_templates.java_library() diff --git a/versions.txt b/versions.txt index dbdc7c5a7f..431e6c82a9 100644 --- a/versions.txt +++ b/versions.txt @@ -2,5 +2,7 @@ # module:released-version:current-version proto-google-cloud-bigquerystorage-v1beta1:0.85.1:0.85.2-SNAPSHOT +proto-google-cloud-bigquerystorage-v1beta2:0.85.1:0.85.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1beta1:0.85.1:0.85.2-SNAPSHOT +grpc-google-cloud-bigquerystorage-v1beta2:0.85.1:0.85.2-SNAPSHOT google-cloud-bigquerystorage:0.120.1-beta:0.120.2-beta-SNAPSHOT