Skip to content

Commit

Permalink
docs: update comments on parameters and validation result (#729)
Browse files Browse the repository at this point in the history
* docs: update comments on parameters and validation result.

PiperOrigin-RevId: 348673154

Source-Author: Google APIs <noreply@google.com>
Source-Date: Tue Dec 22 12:11:37 2020 -0800
Source-Repo: googleapis/googleapis
Source-Sha: 0795e3f854056696f330454023b9fa6d35053b79
Source-Link: googleapis/googleapis@0795e3f

* docs: update comments on parameters and validation result.

PiperOrigin-RevId: 348696929

Source-Author: Google APIs <noreply@google.com>
Source-Date: Tue Dec 22 14:46:59 2020 -0800
Source-Repo: googleapis/googleapis
Source-Sha: 8a6f4d9acb1620af2156b42b37b54eae257b7cad
Source-Link: googleapis/googleapis@8a6f4d9
  • Loading branch information
yoshi-automation authored Jan 7, 2021
1 parent 2b1e8e3 commit 21c7422
Show file tree
Hide file tree
Showing 13 changed files with 223 additions and 233 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,36 +29,6 @@ option java_outer_classname = "AudioConfigProto";
option java_package = "com.google.cloud.dialogflow.v2";
option objc_class_prefix = "DF";

// Hints for the speech recognizer to help with recognition in a specific
// conversation state.
message SpeechContext {
// Optional. A list of strings containing words and phrases that the speech
// recognizer should recognize with higher likelihood.
//
// This list can be used to:
//
// * improve accuracy for words and phrases you expect the user to say,
// e.g. typical commands for your Dialogflow agent
// * add additional words to the speech recognizer vocabulary
// * ...
//
// See the [Cloud Speech
// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
// limits.
repeated string phrases = 1;

// Optional. Boost for this context compared to other contexts:
//
// * If the boost is positive, Dialogflow will increase the probability that
// the phrases in this context are recognized over similar sounding phrases.
// * If the boost is unspecified or non-positive, Dialogflow will not apply
// any boost.
//
// Dialogflow recommends that you use boosts in the range (0, 20] and that you
// find a value that fits your use case with binary search.
float boost = 2;
}

// Audio encoding of the audio content sent in the conversational query request.
// Refer to the
// [Cloud Speech API
Expand Down Expand Up @@ -108,6 +78,36 @@ enum AudioEncoding {
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
}

// Hints for the speech recognizer to help with recognition in a specific
// conversation state.
message SpeechContext {
// Optional. A list of strings containing words and phrases that the speech
// recognizer should recognize with higher likelihood.
//
// This list can be used to:
//
// * improve accuracy for words and phrases you expect the user to say,
// e.g. typical commands for your Dialogflow agent
// * add additional words to the speech recognizer vocabulary
// * ...
//
// See the [Cloud Speech
// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
// limits.
repeated string phrases = 1;

// Optional. Boost for this context compared to other contexts:
//
// * If the boost is positive, Dialogflow will increase the probability that
// the phrases in this context are recognized over similar sounding phrases.
// * If the boost is unspecified or non-positive, Dialogflow will not apply
// any boost.
//
// Dialogflow recommends that you use boosts in the range (0, 20] and that you
// find a value that fits your use case with binary search.
float boost = 2;
}

// Information for a word recognized by the speech recognizer.
message SpeechWordInfo {
// The word this info is for.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,8 @@ message Context {
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: string or number, depending on parameter value type
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,8 @@ service Sessions {
// Note: Always use agent versions for production traffic.
// See [Versions and
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
returns (stream StreamingDetectIntentResponse) {}
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
}
}

// The request to detect user's intent.
Expand Down Expand Up @@ -127,14 +127,12 @@ message DetectIntentRequest {
// configured, no output audio is generated.
OutputAudioConfig output_audio_config = 4;

// Mask for
// [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config]
// indicating which settings in this request-level config should override
// speech synthesizer settings defined at agent-level.
// Mask for [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] indicating which settings in this
// request-level config should override speech synthesizer settings defined at
// agent-level.
//
// If unspecified or empty,
// [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config]
// replaces the agent-level config in its entirety.
// If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] replaces the agent-level
// config in its entirety.
google.protobuf.FieldMask output_audio_config_mask = 7;

// The natural language speech audio to be processed. This field
Expand Down Expand Up @@ -284,7 +282,8 @@ message QueryResult {
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: string or number, depending on parameter value type
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
Expand Down Expand Up @@ -350,29 +349,25 @@ message QueryResult {
}

// The top-level message sent by the client to the
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent]
// method.
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent] method.
//
// Multiple request messages should be sent in order:
//
// 1. The first message must contain
// [session][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.session],
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input]
// plus optionally
// [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
// If the client wants to receive an audio response, it should also contain
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] plus optionally
// [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params]. If the client
// wants to receive an audio response, it should also contain
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config].
// The message must not contain
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio].
// 2. If
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input]
// was set to
// [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig],
// all subsequent messages must contain
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio]
// to continue with Speech recognition. If you decide to rather detect an
// intent from text input after you already started Speech recognition,
// please send a message with
// 2. If [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] was set to
// [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig], all subsequent
// messages must contain
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio] to continue with
// Speech recognition.
// If you decide to rather detect an intent from text input after you
// already started Speech recognition, please send a message with
// [query_input.text][google.cloud.dialogflow.v2.QueryInput.text].
//
// However, note that:
Expand Down Expand Up @@ -421,30 +416,27 @@ message StreamingDetectIntentRequest {
// 3. an event that specifies which intent to trigger.
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];

// Please use
// [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance]
// instead. If `false` (default), recognition does not cease until the client
// closes the stream. If `true`, the recognizer will detect a single spoken
// utterance in input audio. Recognition ceases when it detects the audio's
// voice has stopped or paused. In this case, once a detected intent is
// received, the client should close the stream and start a new request with a
// new stream as needed. This setting is ignored when `query_input` is a piece
// of text or an event.
// Please use [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance] instead.
// If `false` (default), recognition does not cease until
// the client closes the stream. If `true`, the recognizer will detect a
// single spoken utterance in input audio. Recognition ceases when it detects
// the audio's voice has stopped or paused. In this case, once a detected
// intent is received, the client should close the stream and start a new
// request with a new stream as needed.
// This setting is ignored when `query_input` is a piece of text or an event.
bool single_utterance = 4 [deprecated = true];

// Instructs the speech synthesizer how to generate the output
// audio. If this field is not set and agent-level speech synthesizer is not
// configured, no output audio is generated.
OutputAudioConfig output_audio_config = 5;

// Mask for
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config]
// indicating which settings in this request-level config should override
// speech synthesizer settings defined at agent-level.
// Mask for [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] indicating which settings in this
// request-level config should override speech synthesizer settings defined at
// agent-level.
//
// If unspecified or empty,
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config]
// replaces the agent-level config in its entirety.
// If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] replaces the agent-level
// config in its entirety.
google.protobuf.FieldMask output_audio_config_mask = 7;

// The input audio content to be recognized. Must be sent if
Expand Down Expand Up @@ -539,12 +531,11 @@ message StreamingRecognitionResult {

// Event indicates that the server has detected the end of the user's speech
// utterance and expects no additional inputs.
// Therefore, the server will not process additional audio (although it may
// subsequently return additional results). The client should stop sending
// additional audio data, half-close the gRPC connection, and wait for any
// additional results until the server closes the gRPC connection. This
// message is only sent if `single_utterance` was set to `true`, and is not
// used otherwise.
// Therefore, the server will not process additional audio (although it may subsequently return additional results). The
// client should stop sending additional audio data, half-close the gRPC
// connection, and wait for any additional results until the server closes
// the gRPC connection. This message is only sent if `single_utterance` was
// set to `true`, and is not used otherwise.
END_OF_SINGLE_UTTERANCE = 2;
}

Expand All @@ -571,8 +562,7 @@ message StreamingRecognitionResult {
float confidence = 4;

// Word-specific information for the words recognized by Speech in
// [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript].
// Populated if and only if `message_type` = `TRANSCRIPT` and
// [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
// [InputAudioConfig.enable_word_info] is set.
repeated SpeechWordInfo speech_word_info = 7;

Expand Down Expand Up @@ -613,7 +603,8 @@ message EventInput {
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: string or number, depending on parameter value type
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
Expand All @@ -639,14 +630,11 @@ message SentimentAnalysisRequestConfig {
// and identifies the prevailing subjective opinion, especially to determine a
// user's attitude as positive, negative, or neutral.
// For [Participants.DetectIntent][], it needs to be configured in
// [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params].
// For [Participants.StreamingDetectIntent][], it needs to be configured in
// [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params]. For
// [Participants.StreamingDetectIntent][], it needs to be configured in
// [StreamingDetectIntentRequest.query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
// And for
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
// and
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent],
// it needs to be configured in
// And for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent], it needs to be configured in
// [ConversationProfile.human_agent_assistant_config][google.cloud.dialogflow.v2.ConversationProfile.human_agent_assistant_config]
message SentimentAnalysisResult {
// The sentiment analysis result for `query_text`.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ message ValidationError {
// Not specified. This value should never be used.
SEVERITY_UNSPECIFIED = 0;

// The agent doesn't follow Dialogflow best practicies.
// The agent doesn't follow Dialogflow best practices.
INFO = 1;

// The agent may not behave as expected.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,36 +30,6 @@ option java_outer_classname = "AudioConfigProto";
option java_package = "com.google.cloud.dialogflow.v2beta1";
option objc_class_prefix = "DF";

// Hints for the speech recognizer to help with recognition in a specific
// conversation state.
message SpeechContext {
// Optional. A list of strings containing words and phrases that the speech
// recognizer should recognize with higher likelihood.
//
// This list can be used to:
//
// * improve accuracy for words and phrases you expect the user to say,
// e.g. typical commands for your Dialogflow agent
// * add additional words to the speech recognizer vocabulary
// * ...
//
// See the [Cloud Speech
// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
// limits.
repeated string phrases = 1;

// Optional. Boost for this context compared to other contexts:
//
// * If the boost is positive, Dialogflow will increase the probability that
// the phrases in this context are recognized over similar sounding phrases.
// * If the boost is unspecified or non-positive, Dialogflow will not apply
// any boost.
//
// Dialogflow recommends that you use boosts in the range (0, 20] and that you
// find a value that fits your use case with binary search.
float boost = 2;
}

// Audio encoding of the audio content sent in the conversational query request.
// Refer to the
// [Cloud Speech API
Expand Down Expand Up @@ -109,6 +79,36 @@ enum AudioEncoding {
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
}

// Hints for the speech recognizer to help with recognition in a specific
// conversation state.
message SpeechContext {
// Optional. A list of strings containing words and phrases that the speech
// recognizer should recognize with higher likelihood.
//
// This list can be used to:
//
// * improve accuracy for words and phrases you expect the user to say,
// e.g. typical commands for your Dialogflow agent
// * add additional words to the speech recognizer vocabulary
// * ...
//
// See the [Cloud Speech
// documentation](https://cloud.google.com/speech-to-text/quotas) for usage
// limits.
repeated string phrases = 1;

// Optional. Boost for this context compared to other contexts:
//
// * If the boost is positive, Dialogflow will increase the probability that
// the phrases in this context are recognized over similar sounding phrases.
// * If the boost is unspecified or non-positive, Dialogflow will not apply
// any boost.
//
// Dialogflow recommends that you use boosts in the range (0, 20] and that you
// find a value that fits your use case with binary search.
float boost = 2;
}

// Information for a word recognized by the speech recognizer.
message SpeechWordInfo {
// The word this info is for.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,8 @@ message Context {
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: string or number, depending on parameter value type
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,8 @@ message QueryResult {
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: string or number, depending on parameter value type
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
Expand Down Expand Up @@ -740,7 +741,8 @@ message EventInput {
// - MapKey value: parameter name
// - MapValue type:
// - If parameter's entity type is a composite entity: map
// - Else: string or number, depending on parameter value type
// - Else: depending on parameter value type, could be one of string,
// number, boolean, null, list or map
// - MapValue value:
// - If parameter's entity type is a composite entity:
// map from composite entity property names to property values
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ message ValidationError {
// Not specified. This value should never be used.
SEVERITY_UNSPECIFIED = 0;

// The agent doesn't follow Dialogflow best practicies.
// The agent doesn't follow Dialogflow best practices.
INFO = 1;

// The agent may not behave as expected.
Expand Down
Loading

0 comments on commit 21c7422

Please sign in to comment.