From f5d8155d7070f4d1b60ba32bc2f168c4169d9546 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 12 Nov 2021 10:26:32 -0800 Subject: [PATCH] feat: allow setting custom CA for generic webhooks (#203) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: allow setting custom CA for generic webhooks PiperOrigin-RevId: 408995680 Source-Link: https://github.com/googleapis/googleapis/commit/76f7f485d67ea4953c5fe34e2870cc46c8803824 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9e5faa62a238bd42c1350d3e51a6540af7337587 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWU1ZmFhNjJhMjM4YmQ0MmMxMzUwZDNlNTFhNjU0MGFmNzMzNzU4NyJ9 * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md Co-authored-by: Owl Bot --- .../dialogflow/cx/v3beta1/audio_config.proto | 50 +++++----- .../cloud/dialogflow/cx/v3beta1/webhook.proto | 13 +++ protos/protos.d.ts | 30 +++--- protos/protos.js | 94 ++++++++++++++----- protos/protos.json | 32 ++++--- 5 files changed, 144 insertions(+), 75 deletions(-) diff --git a/protos/google/cloud/dialogflow/cx/v3beta1/audio_config.proto b/protos/google/cloud/dialogflow/cx/v3beta1/audio_config.proto index 8c8759c0..3814693a 100644 --- a/protos/google/cloud/dialogflow/cx/v3beta1/audio_config.proto +++ b/protos/google/cloud/dialogflow/cx/v3beta1/audio_config.proto @@ -31,6 +31,31 @@ option java_package = "com.google.cloud.dialogflow.cx.v3beta1"; option objc_class_prefix = "DF"; option ruby_package = "Google::Cloud::Dialogflow::CX::V3beta1"; +// Information for a word recognized by the speech recognizer. +message SpeechWordInfo { + // The word this info is for. + string word = 3; + + // Time offset relative to the beginning of the audio that corresponds to the + // start of the spoken word. This is an experimental feature and the accuracy + // of the time offset can vary. + google.protobuf.Duration start_offset = 1; + + // Time offset relative to the beginning of the audio that corresponds to the + // end of the spoken word. This is an experimental feature and the accuracy of + // the time offset can vary. + google.protobuf.Duration end_offset = 2; + + // The Speech confidence between 0.0 and 1.0 for this word. A higher number + // indicates an estimated greater likelihood that the recognized word is + // correct. The default of 0.0 is a sentinel value indicating that confidence + // was not set. + // + // This field is not guaranteed to be fully stable over time for the same + // audio input. Users should also not rely on it to always be provided. + float confidence = 4; +} + // Audio encoding of the audio content sent in the conversational query request. // Refer to the // [Cloud Speech API @@ -80,31 +105,6 @@ enum AudioEncoding { AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7; } -// Information for a word recognized by the speech recognizer. -message SpeechWordInfo { - // The word this info is for. - string word = 3; - - // Time offset relative to the beginning of the audio that corresponds to the - // start of the spoken word. This is an experimental feature and the accuracy - // of the time offset can vary. - google.protobuf.Duration start_offset = 1; - - // Time offset relative to the beginning of the audio that corresponds to the - // end of the spoken word. This is an experimental feature and the accuracy of - // the time offset can vary. - google.protobuf.Duration end_offset = 2; - - // The Speech confidence between 0.0 and 1.0 for this word. A higher number - // indicates an estimated greater likelihood that the recognized word is - // correct. The default of 0.0 is a sentinel value indicating that confidence - // was not set. - // - // This field is not guaranteed to be fully stable over time for the same - // audio input. Users should also not rely on it to always be provided. - float confidence = 4; -} - // Instructs the speech recognizer on how to process the audio content. message InputAudioConfig { // Required. Audio encoding of the audio content to process. diff --git a/protos/google/cloud/dialogflow/cx/v3beta1/webhook.proto b/protos/google/cloud/dialogflow/cx/v3beta1/webhook.proto index b5b0eaf6..a9c9f9b9 100644 --- a/protos/google/cloud/dialogflow/cx/v3beta1/webhook.proto +++ b/protos/google/cloud/dialogflow/cx/v3beta1/webhook.proto @@ -113,6 +113,19 @@ message Webhook { // The HTTP request headers to send together with webhook // requests. map request_headers = 4; + + // Optional. Specifies a list of allowed custom CA certificates (in DER format) for + // HTTPS verification. This overrides the default SSL trust store. If this + // is empty or unspecified, Dialogflow will use Google's default trust store + // to verify certificates. + // N.B. Make sure the HTTPS server certificates are signed with "subject alt + // name". For instance a certificate can be self-signed using the following + // command, + // openssl x509 -req -days 200 -in example.com.csr \ + // -signkey example.com.key \ + // -out example.com.crt \ + // -extfile <(printf "\nsubjectAltName='DNS:www.example.com'") + repeated bytes allowed_ca_certs = 5 [(google.api.field_behavior) = OPTIONAL]; } // Represents configuration for a [Service diff --git a/protos/protos.d.ts b/protos/protos.d.ts index a15f9e4f..5480656b 100644 --- a/protos/protos.d.ts +++ b/protos/protos.d.ts @@ -35306,18 +35306,6 @@ export namespace google { } } - /** AudioEncoding enum. */ - enum AudioEncoding { - AUDIO_ENCODING_UNSPECIFIED = 0, - AUDIO_ENCODING_LINEAR_16 = 1, - AUDIO_ENCODING_FLAC = 2, - AUDIO_ENCODING_MULAW = 3, - AUDIO_ENCODING_AMR = 4, - AUDIO_ENCODING_AMR_WB = 5, - AUDIO_ENCODING_OGG_OPUS = 6, - AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7 - } - /** Properties of a SpeechWordInfo. */ interface ISpeechWordInfo { @@ -35426,6 +35414,18 @@ export namespace google { public toJSON(): { [k: string]: any }; } + /** AudioEncoding enum. */ + enum AudioEncoding { + AUDIO_ENCODING_UNSPECIFIED = 0, + AUDIO_ENCODING_LINEAR_16 = 1, + AUDIO_ENCODING_FLAC = 2, + AUDIO_ENCODING_MULAW = 3, + AUDIO_ENCODING_AMR = 4, + AUDIO_ENCODING_AMR_WB = 5, + AUDIO_ENCODING_OGG_OPUS = 6, + AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7 + } + /** Properties of an InputAudioConfig. */ interface IInputAudioConfig { @@ -53393,6 +53393,9 @@ export namespace google { /** GenericWebService requestHeaders */ requestHeaders?: ({ [k: string]: string }|null); + + /** GenericWebService allowedCaCerts */ + allowedCaCerts?: (Uint8Array[]|null); } /** Represents a GenericWebService. */ @@ -53416,6 +53419,9 @@ export namespace google { /** GenericWebService requestHeaders. */ public requestHeaders: { [k: string]: string }; + /** GenericWebService allowedCaCerts. */ + public allowedCaCerts: Uint8Array[]; + /** * Creates a new GenericWebService instance using the specified properties. * @param [properties] Properties to set diff --git a/protos/protos.js b/protos/protos.js index 54c49436..ad533d03 100644 --- a/protos/protos.js +++ b/protos/protos.js @@ -82742,32 +82742,6 @@ return SecuritySettings; })(); - /** - * AudioEncoding enum. - * @name google.cloud.dialogflow.cx.v3beta1.AudioEncoding - * @enum {number} - * @property {number} AUDIO_ENCODING_UNSPECIFIED=0 AUDIO_ENCODING_UNSPECIFIED value - * @property {number} AUDIO_ENCODING_LINEAR_16=1 AUDIO_ENCODING_LINEAR_16 value - * @property {number} AUDIO_ENCODING_FLAC=2 AUDIO_ENCODING_FLAC value - * @property {number} AUDIO_ENCODING_MULAW=3 AUDIO_ENCODING_MULAW value - * @property {number} AUDIO_ENCODING_AMR=4 AUDIO_ENCODING_AMR value - * @property {number} AUDIO_ENCODING_AMR_WB=5 AUDIO_ENCODING_AMR_WB value - * @property {number} AUDIO_ENCODING_OGG_OPUS=6 AUDIO_ENCODING_OGG_OPUS value - * @property {number} AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE=7 AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE value - */ - v3beta1.AudioEncoding = (function() { - var valuesById = {}, values = Object.create(valuesById); - values[valuesById[0] = "AUDIO_ENCODING_UNSPECIFIED"] = 0; - values[valuesById[1] = "AUDIO_ENCODING_LINEAR_16"] = 1; - values[valuesById[2] = "AUDIO_ENCODING_FLAC"] = 2; - values[valuesById[3] = "AUDIO_ENCODING_MULAW"] = 3; - values[valuesById[4] = "AUDIO_ENCODING_AMR"] = 4; - values[valuesById[5] = "AUDIO_ENCODING_AMR_WB"] = 5; - values[valuesById[6] = "AUDIO_ENCODING_OGG_OPUS"] = 6; - values[valuesById[7] = "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE"] = 7; - return values; - })(); - v3beta1.SpeechWordInfo = (function() { /** @@ -83032,6 +83006,32 @@ return SpeechWordInfo; })(); + /** + * AudioEncoding enum. + * @name google.cloud.dialogflow.cx.v3beta1.AudioEncoding + * @enum {number} + * @property {number} AUDIO_ENCODING_UNSPECIFIED=0 AUDIO_ENCODING_UNSPECIFIED value + * @property {number} AUDIO_ENCODING_LINEAR_16=1 AUDIO_ENCODING_LINEAR_16 value + * @property {number} AUDIO_ENCODING_FLAC=2 AUDIO_ENCODING_FLAC value + * @property {number} AUDIO_ENCODING_MULAW=3 AUDIO_ENCODING_MULAW value + * @property {number} AUDIO_ENCODING_AMR=4 AUDIO_ENCODING_AMR value + * @property {number} AUDIO_ENCODING_AMR_WB=5 AUDIO_ENCODING_AMR_WB value + * @property {number} AUDIO_ENCODING_OGG_OPUS=6 AUDIO_ENCODING_OGG_OPUS value + * @property {number} AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE=7 AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE value + */ + v3beta1.AudioEncoding = (function() { + var valuesById = {}, values = Object.create(valuesById); + values[valuesById[0] = "AUDIO_ENCODING_UNSPECIFIED"] = 0; + values[valuesById[1] = "AUDIO_ENCODING_LINEAR_16"] = 1; + values[valuesById[2] = "AUDIO_ENCODING_FLAC"] = 2; + values[valuesById[3] = "AUDIO_ENCODING_MULAW"] = 3; + values[valuesById[4] = "AUDIO_ENCODING_AMR"] = 4; + values[valuesById[5] = "AUDIO_ENCODING_AMR_WB"] = 5; + values[valuesById[6] = "AUDIO_ENCODING_OGG_OPUS"] = 6; + values[valuesById[7] = "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE"] = 7; + return values; + })(); + v3beta1.InputAudioConfig = (function() { /** @@ -125039,6 +125039,7 @@ * @property {string|null} [username] GenericWebService username * @property {string|null} [password] GenericWebService password * @property {Object.|null} [requestHeaders] GenericWebService requestHeaders + * @property {Array.|null} [allowedCaCerts] GenericWebService allowedCaCerts */ /** @@ -125051,6 +125052,7 @@ */ function GenericWebService(properties) { this.requestHeaders = {}; + this.allowedCaCerts = []; if (properties) for (var keys = Object.keys(properties), i = 0; i < keys.length; ++i) if (properties[keys[i]] != null) @@ -125089,6 +125091,14 @@ */ GenericWebService.prototype.requestHeaders = $util.emptyObject; + /** + * GenericWebService allowedCaCerts. + * @member {Array.} allowedCaCerts + * @memberof google.cloud.dialogflow.cx.v3beta1.Webhook.GenericWebService + * @instance + */ + GenericWebService.prototype.allowedCaCerts = $util.emptyArray; + /** * Creates a new GenericWebService instance using the specified properties. * @function create @@ -125122,6 +125132,9 @@ if (message.requestHeaders != null && Object.hasOwnProperty.call(message, "requestHeaders")) for (var keys = Object.keys(message.requestHeaders), i = 0; i < keys.length; ++i) writer.uint32(/* id 4, wireType 2 =*/34).fork().uint32(/* id 1, wireType 2 =*/10).string(keys[i]).uint32(/* id 2, wireType 2 =*/18).string(message.requestHeaders[keys[i]]).ldelim(); + if (message.allowedCaCerts != null && message.allowedCaCerts.length) + for (var i = 0; i < message.allowedCaCerts.length; ++i) + writer.uint32(/* id 5, wireType 2 =*/42).bytes(message.allowedCaCerts[i]); return writer; }; @@ -125187,6 +125200,11 @@ } message.requestHeaders[key] = value; break; + case 5: + if (!(message.allowedCaCerts && message.allowedCaCerts.length)) + message.allowedCaCerts = []; + message.allowedCaCerts.push(reader.bytes()); + break; default: reader.skipType(tag & 7); break; @@ -125239,6 +125257,13 @@ if (!$util.isString(message.requestHeaders[key[i]])) return "requestHeaders: string{k:string} expected"; } + if (message.allowedCaCerts != null && message.hasOwnProperty("allowedCaCerts")) { + if (!Array.isArray(message.allowedCaCerts)) + return "allowedCaCerts: array expected"; + for (var i = 0; i < message.allowedCaCerts.length; ++i) + if (!(message.allowedCaCerts[i] && typeof message.allowedCaCerts[i].length === "number" || $util.isString(message.allowedCaCerts[i]))) + return "allowedCaCerts: buffer[] expected"; + } return null; }; @@ -125267,6 +125292,16 @@ for (var keys = Object.keys(object.requestHeaders), i = 0; i < keys.length; ++i) message.requestHeaders[keys[i]] = String(object.requestHeaders[keys[i]]); } + if (object.allowedCaCerts) { + if (!Array.isArray(object.allowedCaCerts)) + throw TypeError(".google.cloud.dialogflow.cx.v3beta1.Webhook.GenericWebService.allowedCaCerts: array expected"); + message.allowedCaCerts = []; + for (var i = 0; i < object.allowedCaCerts.length; ++i) + if (typeof object.allowedCaCerts[i] === "string") + $util.base64.decode(object.allowedCaCerts[i], message.allowedCaCerts[i] = $util.newBuffer($util.base64.length(object.allowedCaCerts[i])), 0); + else if (object.allowedCaCerts[i].length) + message.allowedCaCerts[i] = object.allowedCaCerts[i]; + } return message; }; @@ -125283,6 +125318,8 @@ if (!options) options = {}; var object = {}; + if (options.arrays || options.defaults) + object.allowedCaCerts = []; if (options.objects || options.defaults) object.requestHeaders = {}; if (options.defaults) { @@ -125302,6 +125339,11 @@ for (var j = 0; j < keys2.length; ++j) object.requestHeaders[keys2[j]] = message.requestHeaders[keys2[j]]; } + if (message.allowedCaCerts && message.allowedCaCerts.length) { + object.allowedCaCerts = []; + for (var j = 0; j < message.allowedCaCerts.length; ++j) + object.allowedCaCerts[j] = options.bytes === String ? $util.base64.encode(message.allowedCaCerts[j], 0, message.allowedCaCerts[j].length) : options.bytes === Array ? Array.prototype.slice.call(message.allowedCaCerts[j]) : message.allowedCaCerts[j]; + } return object; }; diff --git a/protos/protos.json b/protos/protos.json index 387ebee3..0870a37b 100644 --- a/protos/protos.json +++ b/protos/protos.json @@ -9534,18 +9534,6 @@ } } }, - "AudioEncoding": { - "values": { - "AUDIO_ENCODING_UNSPECIFIED": 0, - "AUDIO_ENCODING_LINEAR_16": 1, - "AUDIO_ENCODING_FLAC": 2, - "AUDIO_ENCODING_MULAW": 3, - "AUDIO_ENCODING_AMR": 4, - "AUDIO_ENCODING_AMR_WB": 5, - "AUDIO_ENCODING_OGG_OPUS": 6, - "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE": 7 - } - }, "SpeechWordInfo": { "fields": { "word": { @@ -9566,6 +9554,18 @@ } } }, + "AudioEncoding": { + "values": { + "AUDIO_ENCODING_UNSPECIFIED": 0, + "AUDIO_ENCODING_LINEAR_16": 1, + "AUDIO_ENCODING_FLAC": 2, + "AUDIO_ENCODING_MULAW": 3, + "AUDIO_ENCODING_AMR": 4, + "AUDIO_ENCODING_AMR_WB": 5, + "AUDIO_ENCODING_OGG_OPUS": 6, + "AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE": 7 + } + }, "InputAudioConfig": { "fields": { "audioEncoding": { @@ -14434,6 +14434,14 @@ "keyType": "string", "type": "string", "id": 4 + }, + "allowedCaCerts": { + "rule": "repeated", + "type": "bytes", + "id": 5, + "options": { + "(google.api.field_behavior)": "OPTIONAL" + } } } },