Skip to content
This repository has been archived by the owner on Jul 20, 2023. It is now read-only.

Commit

Permalink
feat: added API for changelogs docs: clarified semantic of the stream…
Browse files Browse the repository at this point in the history
…ing APIs (#198)

PiperOrigin-RevId: 404659561
Source-Link: googleapis/googleapis@19943c1
Source-Link: https://github.com/googleapis/googleapis-gen/commit/71bad2d4d5d3cf22e2e3cc0890fe30c8f4b0c148
Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNzFiYWQyZDRkNWQzY2YyMmUyZTNjYzA4OTBmZTMwYzhmNGIwYzE0OCJ9
  • Loading branch information
gcf-owl-bot[bot] authored Oct 21, 2021
1 parent b2ae237 commit 9882149
Show file tree
Hide file tree
Showing 117 changed files with 61,574 additions and 43,133 deletions.
50 changes: 25 additions & 25 deletions protos/google/cloud/dialogflow/cx/v3/audio_config.proto
Original file line number Diff line number Diff line change
Expand Up @@ -31,31 +31,6 @@ option java_package = "com.google.cloud.dialogflow.cx.v3";
option objc_class_prefix = "DF";
option ruby_package = "Google::Cloud::Dialogflow::CX::V3";

// Information for a word recognized by the speech recognizer.
message SpeechWordInfo {
// The word this info is for.
string word = 3;

// Time offset relative to the beginning of the audio that corresponds to the
// start of the spoken word. This is an experimental feature and the accuracy
// of the time offset can vary.
google.protobuf.Duration start_offset = 1;

// Time offset relative to the beginning of the audio that corresponds to the
// end of the spoken word. This is an experimental feature and the accuracy of
// the time offset can vary.
google.protobuf.Duration end_offset = 2;

// The Speech confidence between 0.0 and 1.0 for this word. A higher number
// indicates an estimated greater likelihood that the recognized word is
// correct. The default of 0.0 is a sentinel value indicating that confidence
// was not set.
//
// This field is not guaranteed to be fully stable over time for the same
// audio input. Users should also not rely on it to always be provided.
float confidence = 4;
}

// Audio encoding of the audio content sent in the conversational query request.
// Refer to the
// [Cloud Speech API
Expand Down Expand Up @@ -105,6 +80,31 @@ enum AudioEncoding {
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
}

// Information for a word recognized by the speech recognizer.
message SpeechWordInfo {
// The word this info is for.
string word = 3;

// Time offset relative to the beginning of the audio that corresponds to the
// start of the spoken word. This is an experimental feature and the accuracy
// of the time offset can vary.
google.protobuf.Duration start_offset = 1;

// Time offset relative to the beginning of the audio that corresponds to the
// end of the spoken word. This is an experimental feature and the accuracy of
// the time offset can vary.
google.protobuf.Duration end_offset = 2;

// The Speech confidence between 0.0 and 1.0 for this word. A higher number
// indicates an estimated greater likelihood that the recognized word is
// correct. The default of 0.0 is a sentinel value indicating that confidence
// was not set.
//
// This field is not guaranteed to be fully stable over time for the same
// audio input. Users should also not rely on it to always be provided.
float confidence = 4;
}

// Instructs the speech recognizer on how to process the audio content.
message InputAudioConfig {
// Required. Audio encoding of the audio content to process.
Expand Down
154 changes: 154 additions & 0 deletions protos/google/cloud/dialogflow/cx/v3/changelog.proto
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

syntax = "proto3";

package google.cloud.dialogflow.cx.v3;

import "google/api/annotations.proto";
import "google/api/client.proto";
import "google/api/field_behavior.proto";
import "google/api/resource.proto";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";

option cc_enable_arenas = true;
option csharp_namespace = "Google.Cloud.Dialogflow.Cx.V3";
option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/cx/v3;cx";
option java_multiple_files = true;
option java_outer_classname = "ChangelogProto";
option java_package = "com.google.cloud.dialogflow.cx.v3";
option objc_class_prefix = "DF";
option ruby_package = "Google::Cloud::Dialogflow::CX::V3";

// Service for managing [Changelogs][google.cloud.dialogflow.cx.v3.Changelog].
service Changelogs {
option (google.api.default_host) = "dialogflow.googleapis.com";
option (google.api.oauth_scopes) =
"https://www.googleapis.com/auth/cloud-platform,"
"https://www.googleapis.com/auth/dialogflow";

// Returns the list of Changelogs.
rpc ListChangelogs(ListChangelogsRequest) returns (ListChangelogsResponse) {
option (google.api.http) = {
get: "/v3/{parent=projects/*/locations/*/agents/*}/changelogs"
};
option (google.api.method_signature) = "parent";
}

// Retrieves the specified Changelog.
rpc GetChangelog(GetChangelogRequest) returns (Changelog) {
option (google.api.http) = {
get: "/v3/{name=projects/*/locations/*/agents/*/changelogs/*}"
};
option (google.api.method_signature) = "name";
}
}

// The request message for [Changelogs.ListChangelogs][google.cloud.dialogflow.cx.v3.Changelogs.ListChangelogs].
message ListChangelogsRequest {
// Required. The agent containing the changelogs.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>`.
string parent = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
child_type: "dialogflow.googleapis.com/Changelog"
}
];

// The filter string. Supports filter by user_email, resource, type and
// create_time. Some examples:
// 1. By user email:
// user_email = "someone@google.com"
// 2. By resource name:
// resource = "projects/123/locations/global/agents/456/flows/789"
// 3. By resource display name:
// display_name = "my agent"
// 4. By action:
// action = "Create"
// 5. By type:
// type = "flows"
// 6. By create time. Currently predicates on `create_time` and
// `create_time_epoch_seconds` are supported:
// create_time_epoch_seconds > 1551790877 AND create_time <=
// 2017-01-15T01:30:15.01Z
// 7. Combination of above filters:
// resource = "projects/123/locations/global/agents/456/flows/789"
// AND user_email = "someone@google.com"
// AND create_time <= 2017-01-15T01:30:15.01Z
string filter = 2;

// The maximum number of items to return in a single page. By default 100 and
// at most 1000.
int32 page_size = 3;

// The next_page_token value returned from a previous list request.
string page_token = 4;
}

// The response message for [Changelogs.ListChangelogs][google.cloud.dialogflow.cx.v3.Changelogs.ListChangelogs].
message ListChangelogsResponse {
// The list of changelogs. There will be a maximum number of items returned
// based on the page_size field in the request. The changelogs will be ordered
// by timestamp.
repeated Changelog changelogs = 1;

// Token to retrieve the next page of results, or empty if there are no more
// results in the list.
string next_page_token = 2;
}

// The request message for [Changelogs.GetChangelog][google.cloud.dialogflow.cx.v3.Changelogs.GetChangelog].
message GetChangelogRequest {
// Required. The name of the changelog to get.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/changelogs/<Changelog ID>`.
string name = 1 [
(google.api.field_behavior) = REQUIRED,
(google.api.resource_reference) = {
type: "dialogflow.googleapis.com/Changelog"
}
];
}

// Changelogs represents a change made to a given agent.
message Changelog {
option (google.api.resource) = {
type: "dialogflow.googleapis.com/Changelog"
pattern: "projects/{project}/locations/{location}/agents/{agent}/changelogs/{changelog}"
};

// The unique identifier of the changelog.
// Format: `projects/<Project ID>/locations/<Location ID>/agents/<Agent
// ID>/changelogs/<Changelog ID>`.
string name = 1;

// Email address of the authenticated user.
string user_email = 2;

// The affected resource display name of the change.
string display_name = 7;

// The action of the change.
string action = 11;

// The affected resource type.
string type = 8;

// The affected resource name of the change.
string resource = 3;

// The timestamp of the change.
google.protobuf.Timestamp create_time = 4;
}
86 changes: 49 additions & 37 deletions protos/google/cloud/dialogflow/cx/v3/session.proto
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ message DetectIntentResponse {
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
// [session][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.session],
// [query_input][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_input] plus optionally
// [query_params][google.cloud.dialogflow.cx.v3.StreamingDetectIntentRequest.query_params]. If the client
// wants to receive an audio response, it should also contain
Expand Down Expand Up @@ -261,21 +261,29 @@ message StreamingDetectIntentRequest {
bool enable_partial_response = 5;
}

// The top-level message returned from the `StreamingDetectIntent` method.
// The top-level message returned from the
// [StreamingDetectIntent][google.cloud.dialogflow.cx.v3.Sessions.StreamingDetectIntent] method.
//
// Multiple response messages can be returned in order:
// Multiple response messages (N) can be returned in order.
//
// 1. If the input was set to streaming audio, the first one or more messages
// contain `recognition_result`. Each `recognition_result` represents a more
// complete transcript of what the user said. The last `recognition_result`
// has `is_final` set to `true`.
// The first (N-1) responses set either the `recognition_result` or
// `detect_intent_response` field, depending on the request:
//
// 2. If `enable_partial_response` is true, the following N messages
// (currently 1 <= N <= 4) contain `detect_intent_response`. The first (N-1)
// `detect_intent_response`s will have `response_type` set to `PARTIAL`.
// The last `detect_intent_response` has `response_type` set to `FINAL`.
// If `response_type` is false, response stream only contains
// the final `detect_intent_response`.
// * If the `StreamingDetectIntentRequest.query_input.audio` field was
// set, and the `StreamingDetectIntentRequest.enable_partial_response`
// field was false, the `recognition_result` field is populated for each
// of the (N-1) responses.
// See the [StreamingRecognitionResult][google.cloud.dialogflow.cx.v3.StreamingRecognitionResult] message for details
// about the result message sequence.
//
// * If the `StreamingDetectIntentRequest.enable_partial_response` field was
// true, the `detect_intent_response` field is populated for each
// of the (N-1) responses, where 1 <= N <= 4.
// These responses set the [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type] field
// to `PARTIAL`.
//
// For the final Nth response message, the `detect_intent_response` is fully
// populated, and [DetectIntentResponse.response_type][google.cloud.dialogflow.cx.v3.DetectIntentResponse.response_type] is set to `FINAL`.
message StreamingDetectIntentResponse {
// The output response.
oneof response {
Expand All @@ -291,35 +299,39 @@ message StreamingDetectIntentResponse {
// that is currently being processed or an indication that this is the end
// of the single requested utterance.
//
// Example:
//
// 1. transcript: "tube"
//
// 2. transcript: "to be a"
//
// 3. transcript: "to be"
//
// 4. transcript: "to be or not to be"
// is_final: true
//
// 5. transcript: " that's"
//
// 6. transcript: " that is"
//
// 7. message_type: `END_OF_SINGLE_UTTERANCE`
// While end-user audio is being processed, Dialogflow sends a series of
// results. Each result may contain a `transcript` value. A transcript
// represents a portion of the utterance. While the recognizer is processing
// audio, transcript values may be interim values or finalized values.
// Once a transcript is finalized, the `is_final` value is set to true and
// processing continues for the next transcript.
//
// 8. transcript: " that is the question"
// is_final: true
// If `StreamingDetectIntentRequest.query_input.audio.config.single_utterance`
// was true, and the recognizer has completed processing audio,
// the `message_type` value is set to `END_OF_SINGLE_UTTERANCE and the
// following (last) result contains the last finalized transcript.
//
// Only two of the responses contain final results (#4 and #8 indicated by
// `is_final: true`). Concatenating these generates the full transcript: "to be
// or not to be that is the question".
// The complete end-user utterance is determined by concatenating the
// finalized transcript values received for the series of results.
//
// In each response we populate:
// In the following example, single utterance is enabled. In the case where
// single utterance is not enabled, result 7 would not occur.
//
// * for `TRANSCRIPT`: `transcript` and possibly `is_final`.
// ```
// Num | transcript | message_type | is_final
// --- | ----------------------- | ----------------------- | --------
// 1 | "tube" | TRANSCRIPT | false
// 2 | "to be a" | TRANSCRIPT | false
// 3 | "to be" | TRANSCRIPT | false
// 4 | "to be or not to be" | TRANSCRIPT | true
// 5 | "that's" | TRANSCRIPT | false
// 6 | "that is | TRANSCRIPT | false
// 7 | unset | END_OF_SINGLE_UTTERANCE | unset
// 8 | " that is the question" | TRANSCRIPT | true
// ```
//
// * for `END_OF_SINGLE_UTTERANCE`: only `message_type`.
// Concatenating the finalized transcripts with `is_final` set to true,
// the complete utterance becomes "to be or not to be that is the question".
message StreamingRecognitionResult {
// Type of the response message.
enum MessageType {
Expand Down
Loading

0 comments on commit 9882149

Please sign in to comment.