From 7bd13e708490fe893b520ac2144da7e992fedb5e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 1 Aug 2022 10:50:43 -0700 Subject: [PATCH] docs: clarify size limitations for AppendRowsRequest (#283) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: clarify size limitations for AppendRowsRequest chore: add preferred_min_stream_count to CreateReadSessionRequest chore: add write_stream to AppendRowsResponse PiperOrigin-RevId: 463602530 Source-Link: https://github.com/googleapis/googleapis/commit/d33b3fa0897cee1cc57b5b428587052c87e9bf25 Source-Link: https://github.com/googleapis/googleapis-gen/commit/90995f6433d0ecd290f186168ce957d6a0db9c68 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTA5OTVmNjQzM2QwZWNkMjkwZjE4NjE2OGNlOTU3ZDZhMGRiOWM2OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../cloud/bigquery/storage/v1/storage.proto | 17 +++++++ .../protos/protos.d.ts | 12 +++++ .../protos/protos.js | 47 ++++++++++++++++++- .../protos/protos.json | 8 ++++ .../v1/big_query_read.create_read_session.js | 10 ++++ ...data.google.cloud.bigquery.storage.v1.json | 6 ++- .../src/v1/big_query_read_client.ts | 9 ++++ 7 files changed, 107 insertions(+), 2 deletions(-) diff --git a/packages/google-cloud-bigquery-storage/protos/google/cloud/bigquery/storage/v1/storage.proto b/packages/google-cloud-bigquery-storage/protos/google/cloud/bigquery/storage/v1/storage.proto index f3c974c6461..e0b25c1afef 100644 --- a/packages/google-cloud-bigquery-storage/protos/google/cloud/bigquery/storage/v1/storage.proto +++ b/packages/google-cloud-bigquery-storage/protos/google/cloud/bigquery/storage/v1/storage.proto @@ -256,6 +256,16 @@ message CreateReadSessionRequest { // determine an upper bound OR set this a size for the maximum "units of work" // it can gracefully handle. int32 max_stream_count = 3; + + // The minimum preferred stream count. This parameter can be used to inform + // the service that there is a desired lower bound on the number of streams. + // This is typically a target parallelism of the client (e.g. a Spark + // cluster with N-workers would set this to a low multiple of N to ensure + // good cluster utilization). + // + // The system will make a best effort to provide at least this number of + // streams, but in some cases might provide less. + int32 preferred_min_stream_count = 4; } // Request message for `ReadRows`. @@ -395,6 +405,9 @@ message CreateWriteStreamRequest { // Due to the nature of AppendRows being a bidirectional streaming RPC, certain // parts of the AppendRowsRequest need only be specified for the first request // sent each time the gRPC network connection is opened/reopened. +// +// The size of a single AppendRowsRequest must be less than 10 MB in size. +// Requests larger than this return an error, typically `INVALID_ARGUMENT`. message AppendRowsRequest { // ProtoData contains the data rows and schema when constructing append // requests. @@ -495,6 +508,10 @@ message AppendRowsResponse { // appended. The API will return row level error info, so that the caller can // remove the bad rows and retry the request. repeated RowError row_errors = 4; + + // The target of the append operation. Matches the write_stream in the + // corresponding request. + string write_stream = 5; } // Request message for `GetWriteStreamRequest`. diff --git a/packages/google-cloud-bigquery-storage/protos/protos.d.ts b/packages/google-cloud-bigquery-storage/protos/protos.d.ts index b111ca1f564..197d0abe8c4 100644 --- a/packages/google-cloud-bigquery-storage/protos/protos.d.ts +++ b/packages/google-cloud-bigquery-storage/protos/protos.d.ts @@ -929,6 +929,9 @@ export namespace google { /** CreateReadSessionRequest maxStreamCount */ maxStreamCount?: (number|null); + + /** CreateReadSessionRequest preferredMinStreamCount */ + preferredMinStreamCount?: (number|null); } /** Represents a CreateReadSessionRequest. */ @@ -949,6 +952,9 @@ export namespace google { /** CreateReadSessionRequest maxStreamCount. */ public maxStreamCount: number; + /** CreateReadSessionRequest preferredMinStreamCount. */ + public preferredMinStreamCount: number; + /** * Creates a new CreateReadSessionRequest instance using the specified properties. * @param [properties] Properties to set @@ -2039,6 +2045,9 @@ export namespace google { /** AppendRowsResponse rowErrors */ rowErrors?: (google.cloud.bigquery.storage.v1.IRowError[]|null); + + /** AppendRowsResponse writeStream */ + writeStream?: (string|null); } /** Represents an AppendRowsResponse. */ @@ -2062,6 +2071,9 @@ export namespace google { /** AppendRowsResponse rowErrors. */ public rowErrors: google.cloud.bigquery.storage.v1.IRowError[]; + /** AppendRowsResponse writeStream. */ + public writeStream: string; + /** AppendRowsResponse response. */ public response?: ("appendResult"|"error"); diff --git a/packages/google-cloud-bigquery-storage/protos/protos.js b/packages/google-cloud-bigquery-storage/protos/protos.js index ea81e6448f4..32551b9ff33 100644 --- a/packages/google-cloud-bigquery-storage/protos/protos.js +++ b/packages/google-cloud-bigquery-storage/protos/protos.js @@ -1919,6 +1919,7 @@ * @property {string|null} [parent] CreateReadSessionRequest parent * @property {google.cloud.bigquery.storage.v1.IReadSession|null} [readSession] CreateReadSessionRequest readSession * @property {number|null} [maxStreamCount] CreateReadSessionRequest maxStreamCount + * @property {number|null} [preferredMinStreamCount] CreateReadSessionRequest preferredMinStreamCount */ /** @@ -1960,6 +1961,14 @@ */ CreateReadSessionRequest.prototype.maxStreamCount = 0; + /** + * CreateReadSessionRequest preferredMinStreamCount. + * @member {number} preferredMinStreamCount + * @memberof google.cloud.bigquery.storage.v1.CreateReadSessionRequest + * @instance + */ + CreateReadSessionRequest.prototype.preferredMinStreamCount = 0; + /** * Creates a new CreateReadSessionRequest instance using the specified properties. * @function create @@ -1990,6 +1999,8 @@ $root.google.cloud.bigquery.storage.v1.ReadSession.encode(message.readSession, writer.uint32(/* id 2, wireType 2 =*/18).fork()).ldelim(); if (message.maxStreamCount != null && Object.hasOwnProperty.call(message, "maxStreamCount")) writer.uint32(/* id 3, wireType 0 =*/24).int32(message.maxStreamCount); + if (message.preferredMinStreamCount != null && Object.hasOwnProperty.call(message, "preferredMinStreamCount")) + writer.uint32(/* id 4, wireType 0 =*/32).int32(message.preferredMinStreamCount); return writer; }; @@ -2033,6 +2044,9 @@ case 3: message.maxStreamCount = reader.int32(); break; + case 4: + message.preferredMinStreamCount = reader.int32(); + break; default: reader.skipType(tag & 7); break; @@ -2079,6 +2093,9 @@ if (message.maxStreamCount != null && message.hasOwnProperty("maxStreamCount")) if (!$util.isInteger(message.maxStreamCount)) return "maxStreamCount: integer expected"; + if (message.preferredMinStreamCount != null && message.hasOwnProperty("preferredMinStreamCount")) + if (!$util.isInteger(message.preferredMinStreamCount)) + return "preferredMinStreamCount: integer expected"; return null; }; @@ -2103,6 +2120,8 @@ } if (object.maxStreamCount != null) message.maxStreamCount = object.maxStreamCount | 0; + if (object.preferredMinStreamCount != null) + message.preferredMinStreamCount = object.preferredMinStreamCount | 0; return message; }; @@ -2123,6 +2142,7 @@ object.parent = ""; object.readSession = null; object.maxStreamCount = 0; + object.preferredMinStreamCount = 0; } if (message.parent != null && message.hasOwnProperty("parent")) object.parent = message.parent; @@ -2130,6 +2150,8 @@ object.readSession = $root.google.cloud.bigquery.storage.v1.ReadSession.toObject(message.readSession, options); if (message.maxStreamCount != null && message.hasOwnProperty("maxStreamCount")) object.maxStreamCount = message.maxStreamCount; + if (message.preferredMinStreamCount != null && message.hasOwnProperty("preferredMinStreamCount")) + object.preferredMinStreamCount = message.preferredMinStreamCount; return object; }; @@ -4533,6 +4555,7 @@ * @property {google.rpc.IStatus|null} [error] AppendRowsResponse error * @property {google.cloud.bigquery.storage.v1.ITableSchema|null} [updatedSchema] AppendRowsResponse updatedSchema * @property {Array.|null} [rowErrors] AppendRowsResponse rowErrors + * @property {string|null} [writeStream] AppendRowsResponse writeStream */ /** @@ -4583,6 +4606,14 @@ */ AppendRowsResponse.prototype.rowErrors = $util.emptyArray; + /** + * AppendRowsResponse writeStream. + * @member {string} writeStream + * @memberof google.cloud.bigquery.storage.v1.AppendRowsResponse + * @instance + */ + AppendRowsResponse.prototype.writeStream = ""; + // OneOf field names bound to virtual getters and setters var $oneOfFields; @@ -4630,6 +4661,8 @@ if (message.rowErrors != null && message.rowErrors.length) for (var i = 0; i < message.rowErrors.length; ++i) $root.google.cloud.bigquery.storage.v1.RowError.encode(message.rowErrors[i], writer.uint32(/* id 4, wireType 2 =*/34).fork()).ldelim(); + if (message.writeStream != null && Object.hasOwnProperty.call(message, "writeStream")) + writer.uint32(/* id 5, wireType 2 =*/42).string(message.writeStream); return writer; }; @@ -4678,6 +4711,9 @@ message.rowErrors = []; message.rowErrors.push($root.google.cloud.bigquery.storage.v1.RowError.decode(reader, reader.uint32())); break; + case 5: + message.writeStream = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -4746,6 +4782,9 @@ return "rowErrors." + error; } } + if (message.writeStream != null && message.hasOwnProperty("writeStream")) + if (!$util.isString(message.writeStream)) + return "writeStream: string expected"; return null; }; @@ -4786,6 +4825,8 @@ message.rowErrors[i] = $root.google.cloud.bigquery.storage.v1.RowError.fromObject(object.rowErrors[i]); } } + if (object.writeStream != null) + message.writeStream = String(object.writeStream); return message; }; @@ -4804,8 +4845,10 @@ var object = {}; if (options.arrays || options.defaults) object.rowErrors = []; - if (options.defaults) + if (options.defaults) { object.updatedSchema = null; + object.writeStream = ""; + } if (message.appendResult != null && message.hasOwnProperty("appendResult")) { object.appendResult = $root.google.cloud.bigquery.storage.v1.AppendRowsResponse.AppendResult.toObject(message.appendResult, options); if (options.oneofs) @@ -4823,6 +4866,8 @@ for (var j = 0; j < message.rowErrors.length; ++j) object.rowErrors[j] = $root.google.cloud.bigquery.storage.v1.RowError.toObject(message.rowErrors[j], options); } + if (message.writeStream != null && message.hasOwnProperty("writeStream")) + object.writeStream = message.writeStream; return object; }; diff --git a/packages/google-cloud-bigquery-storage/protos/protos.json b/packages/google-cloud-bigquery-storage/protos/protos.json index d796f2c8745..25abbfdca79 100644 --- a/packages/google-cloud-bigquery-storage/protos/protos.json +++ b/packages/google-cloud-bigquery-storage/protos/protos.json @@ -309,6 +309,10 @@ "maxStreamCount": { "type": "int32", "id": 3 + }, + "preferredMinStreamCount": { + "type": "int32", + "id": 4 } } }, @@ -528,6 +532,10 @@ "rule": "repeated", "type": "RowError", "id": 4 + }, + "writeStream": { + "type": "string", + "id": 5 } }, "nested": { diff --git a/packages/google-cloud-bigquery-storage/samples/generated/v1/big_query_read.create_read_session.js b/packages/google-cloud-bigquery-storage/samples/generated/v1/big_query_read.create_read_session.js index c5cc160a217..1f239c8f2e1 100644 --- a/packages/google-cloud-bigquery-storage/samples/generated/v1/big_query_read.create_read_session.js +++ b/packages/google-cloud-bigquery-storage/samples/generated/v1/big_query_read.create_read_session.js @@ -46,6 +46,16 @@ function main(parent, readSession) { * it can gracefully handle. */ // const maxStreamCount = 1234 + /** + * The minimum preferred stream count. This parameter can be used to inform + * the service that there is a desired lower bound on the number of streams. + * This is typically a target parallelism of the client (e.g. a Spark + * cluster with N-workers would set this to a low multiple of N to ensure + * good cluster utilization). + * The system will make a best effort to provide at least this number of + * streams, but in some cases might provide less. + */ + // const preferredMinStreamCount = 1234 // Imports the Storage library const {BigQueryReadClient} = require('@google-cloud/bigquery-storage').v1; diff --git a/packages/google-cloud-bigquery-storage/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json b/packages/google-cloud-bigquery-storage/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json index 28d36811dc6..ef8132bd613 100644 --- a/packages/google-cloud-bigquery-storage/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json +++ b/packages/google-cloud-bigquery-storage/samples/generated/v1/snippet_metadata.google.cloud.bigquery.storage.v1.json @@ -22,7 +22,7 @@ "segments": [ { "start": 25, - "end": 68, + "end": 78, "type": "FULL" } ], @@ -42,6 +42,10 @@ { "name": "max_stream_count", "type": "TYPE_INT32" + }, + { + "name": "preferred_min_stream_count", + "type": "TYPE_INT32" } ], "resultType": ".google.cloud.bigquery.storage.v1.ReadSession", diff --git a/packages/google-cloud-bigquery-storage/src/v1/big_query_read_client.ts b/packages/google-cloud-bigquery-storage/src/v1/big_query_read_client.ts index 553fe889849..671bb00f297 100644 --- a/packages/google-cloud-bigquery-storage/src/v1/big_query_read_client.ts +++ b/packages/google-cloud-bigquery-storage/src/v1/big_query_read_client.ts @@ -376,6 +376,15 @@ export class BigQueryReadClient { * Typically, clients should either leave this unset to let the system to * determine an upper bound OR set this a size for the maximum "units of work" * it can gracefully handle. + * @param {number} request.preferredMinStreamCount + * The minimum preferred stream count. This parameter can be used to inform + * the service that there is a desired lower bound on the number of streams. + * This is typically a target parallelism of the client (e.g. a Spark + * cluster with N-workers would set this to a low multiple of N to ensure + * good cluster utilization). + * + * The system will make a best effort to provide at least this number of + * streams, but in some cases might provide less. * @param {object} [options] * Call options. See {@link https://googleapis.dev/nodejs/google-gax/latest/interfaces/CallOptions.html|CallOptions} for more details. * @returns {Promise} - The promise which resolves to an array.