// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.cloud.dialogflow.v2; import "google/api/annotations.proto"; import "google/api/client.proto"; import "google/api/field_behavior.proto"; import "google/api/resource.proto"; import "google/cloud/dialogflow/v2/audio_config.proto"; import "google/cloud/dialogflow/v2/session.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; option cc_enable_arenas = true; option csharp_namespace = "Google.Cloud.Dialogflow.V2"; option go_package = "google.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow"; option java_multiple_files = true; option java_outer_classname = "ParticipantProto"; option java_package = "com.google.cloud.dialogflow.v2"; option objc_class_prefix = "DF"; // Service for managing [Participants][google.cloud.dialogflow.v2.Participant]. service Participants { option (google.api.default_host) = "dialogflow.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform," "https://www.googleapis.com/auth/dialogflow"; // Creates a new participant in a conversation. rpc CreateParticipant(CreateParticipantRequest) returns (Participant) { option (google.api.http) = { post: "/v2/{parent=projects/*/conversations/*}/participants" body: "participant" additional_bindings { post: "/v2/{parent=projects/*/locations/*/conversations/*}/participants" body: "participant" } }; option (google.api.method_signature) = "parent,participant"; } // Retrieves a conversation participant. rpc GetParticipant(GetParticipantRequest) returns (Participant) { option (google.api.http) = { get: "/v2/{name=projects/*/conversations/*/participants/*}" additional_bindings { get: "/v2/{name=projects/*/locations/*/conversations/*/participants/*}" } }; option (google.api.method_signature) = "name"; } // Returns the list of all participants in the specified conversation. rpc ListParticipants(ListParticipantsRequest) returns (ListParticipantsResponse) { option (google.api.http) = { get: "/v2/{parent=projects/*/conversations/*}/participants" additional_bindings { get: "/v2/{parent=projects/*/locations/*/conversations/*}/participants" } }; option (google.api.method_signature) = "parent"; } // Updates the specified participant. rpc UpdateParticipant(UpdateParticipantRequest) returns (Participant) { option (google.api.http) = { patch: "/v2/{participant.name=projects/*/conversations/*/participants/*}" body: "participant" additional_bindings { patch: "/v2/{participant.name=projects/*/locations/*/conversations/*/participants/*}" body: "participant" } }; option (google.api.method_signature) = "participant,update_mask"; } // Adds a text (chat, for example), or audio (phone recording, for example) // message from a participant into the conversation. // // Note: Always use agent versions for production traffic // sent to virtual agents. See [Versions and // environments(https://cloud.google.com/dialogflow/es/docs/agents-versions). rpc AnalyzeContent(AnalyzeContentRequest) returns (AnalyzeContentResponse) { option (google.api.http) = { post: "/v2/{participant=projects/*/conversations/*/participants/*}:analyzeContent" body: "*" additional_bindings { post: "/v2/{participant=projects/*/locations/*/conversations/*/participants/*}:analyzeContent" body: "*" } }; option (google.api.method_signature) = "participant,text_input"; option (google.api.method_signature) = "participant,audio_input"; option (google.api.method_signature) = "participant,event_input"; } // Adds a text (chat, for example), or audio (phone recording, for example) // message from a participant into the conversation. // Note: This method is only available through the gRPC API (not REST). // // The top-level message sent to the client by the server is // `StreamingAnalyzeContentResponse`. Multiple response messages can be // returned in order. The first one or more messages contain the // `recognition_result` field. Each result represents a more complete // transcript of what the user said. The next message contains the // `reply_text` field and potentially the `reply_audio` field. The message can // also contain the `automated_agent_reply` field. // // Note: Always use agent versions for production traffic // sent to virtual agents. See [Versions and // environments(https://cloud.google.com/dialogflow/es/docs/agents-versions). rpc StreamingAnalyzeContent(stream StreamingAnalyzeContentRequest) returns (stream StreamingAnalyzeContentResponse) { } // Gets suggested articles for a participant based on specific historical // messages. rpc SuggestArticles(SuggestArticlesRequest) returns (SuggestArticlesResponse) { option (google.api.http) = { post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestArticles" body: "*" additional_bindings { post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestArticles" body: "*" } }; option (google.api.method_signature) = "parent"; } // Gets suggested faq answers for a participant based on specific historical // messages. rpc SuggestFaqAnswers(SuggestFaqAnswersRequest) returns (SuggestFaqAnswersResponse) { option (google.api.http) = { post: "/v2/{parent=projects/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers" body: "*" additional_bindings { post: "/v2/{parent=projects/*/locations/*/conversations/*/participants/*}/suggestions:suggestFaqAnswers" body: "*" } }; option (google.api.method_signature) = "parent"; } } // Represents a conversation participant (human agent, virtual agent, end-user). message Participant { option (google.api.resource) = { type: "dialogflow.googleapis.com/Participant" pattern: "projects/{project}/conversations/{conversation}/participants/{participant}" pattern: "projects/{project}/locations/{location}/conversations/{conversation}/participants/{participant}" }; // Enumeration of the roles a participant can play in a conversation. enum Role { // Participant role not set. ROLE_UNSPECIFIED = 0; // Participant is a human agent. HUMAN_AGENT = 1; // Participant is an automated agent, such as a Dialogflow agent. AUTOMATED_AGENT = 2; // Participant is an end user that has called or chatted with // Dialogflow services. END_USER = 3; } // Optional. The unique identifier of this participant. // Format: `projects//locations//conversations//participants/`. string name = 1 [(google.api.field_behavior) = OPTIONAL]; // Immutable. The role this participant plays in the conversation. This field must be set // during participant creation and is then immutable. Role role = 2 [(google.api.field_behavior) = IMMUTABLE]; // Optional. Label applied to streams representing this participant in SIPREC // XML metadata and SDP. This is used to assign transcriptions from that // media stream to this participant. This field can be updated. string sip_recording_media_label = 6 [(google.api.field_behavior) = OPTIONAL]; } // Represents a message posted into a conversation. message Message { option (google.api.resource) = { type: "dialogflow.googleapis.com/Message" pattern: "projects/{project}/conversations/{conversation}/messages/{message}" pattern: "projects/{project}/locations/{location}/conversations/{conversation}/messages/{message}" }; // The unique identifier of the message. // Format: `projects//locations//conversations//messages/`. string name = 1; // Required. The message content. string content = 2 [(google.api.field_behavior) = REQUIRED]; // Optional. The message language. // This should be a [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) // language tag. Example: "en-US". string language_code = 3 [(google.api.field_behavior) = OPTIONAL]; // Output only. The participant that sends this message. string participant = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The role of the participant. Participant.Role participant_role = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The time when the message was created. google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. The annotation for the message. MessageAnnotation message_annotation = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; } // The request message for [Participants.CreateParticipant][google.cloud.dialogflow.v2.Participants.CreateParticipant]. message CreateParticipantRequest { // Required. Resource identifier of the conversation adding the participant. // Format: `projects//locations//conversations/`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { child_type: "dialogflow.googleapis.com/Participant" } ]; // Required. The participant to create. Participant participant = 2 [(google.api.field_behavior) = REQUIRED]; } // The request message for [Participants.GetParticipant][google.cloud.dialogflow.v2.Participants.GetParticipant]. message GetParticipantRequest { // Required. The name of the participant. Format: // `projects//locations//conversations//participants/`. string name = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Participant" } ]; } // The request message for [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants]. message ListParticipantsRequest { // Required. The conversation to list all participants from. // Format: `projects//locations//conversations/`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { child_type: "dialogflow.googleapis.com/Participant" } ]; // Optional. The maximum number of items to return in a single page. By // default 100 and at most 1000. int32 page_size = 2 [(google.api.field_behavior) = OPTIONAL]; // Optional. The next_page_token value returned from a previous list request. string page_token = 3 [(google.api.field_behavior) = OPTIONAL]; } // The response message for [Participants.ListParticipants][google.cloud.dialogflow.v2.Participants.ListParticipants]. message ListParticipantsResponse { // The list of participants. There is a maximum number of items // returned based on the page_size field in the request. repeated Participant participants = 1; // Token to retrieve the next page of results or empty if there are no // more results in the list. string next_page_token = 2; } // The request message for [Participants.UpdateParticipant][google.cloud.dialogflow.v2.Participants.UpdateParticipant]. message UpdateParticipantRequest { // Required. The participant to update. Participant participant = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The mask to specify which fields to update. google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } // The request message for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]. message AnalyzeContentRequest { // Required. The name of the participant this text comes from. // Format: `projects//locations//conversations//participants/`. string participant = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Participant" } ]; // Required. The input content. oneof input { // The natural language text to be processed. TextInput text_input = 6; // The natural language speech audio to be processed. AudioInput audio_input = 7; // An input event to send to Dialogflow. EventInput event_input = 8; } // Speech synthesis configuration. // The speech synthesis settings for a virtual agent that may be configured // for the associated conversation profile are not used when calling // AnalyzeContent. If this configuration is not supplied, speech synthesis // is disabled. OutputAudioConfig reply_audio_config = 5; // Parameters for a Dialogflow virtual-agent query. QueryParameters query_params = 9; // A unique identifier for this request. Restricted to 36 ASCII characters. // A random UUID is recommended. // This request is only idempotent if a `request_id` is provided. string request_id = 11; } // The message in the response that indicates the parameters of DTMF. message DtmfParameters { // Indicates whether DTMF input can be handled in the next request. bool accepts_dtmf_input = 1; } // The response message for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]. message AnalyzeContentResponse { // The output text content. // This field is set if the automated agent responded with text to show to // the user. string reply_text = 1; // The audio data bytes encoded as specified in the request. // This field is set if: // // - `reply_audio_config` was specified in the request, or // - The automated agent responded with audio to play to the user. In such // case, `reply_audio.config` contains settings used to synthesize the // speech. // // In some scenarios, multiple output audio fields may be present in the // response structure. In these cases, only the top-most-level audio output // has content. OutputAudio reply_audio = 2; // Only set if a Dialogflow automated agent has responded. // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][] // and [AutomatedAgentReply.detect_intent_response.output_audio_config][] // are always empty, use [reply_audio][google.cloud.dialogflow.v2.AnalyzeContentResponse.reply_audio] instead. AutomatedAgentReply automated_agent_reply = 3; // Message analyzed by CCAI. Message message = 5; // The suggestions for most recent human agent. The order is the same as // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config]. repeated SuggestionResult human_agent_suggestion_results = 6; // The suggestions for end user. The order is the same as // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config]. repeated SuggestionResult end_user_suggestion_results = 7; // Indicates the parameters of DTMF. DtmfParameters dtmf_parameters = 9; } // The top-level message sent by the client to the // [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent] method. // // Multiple request messages should be sent in order: // // 1. The first message must contain // [participant][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.participant], // [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] and optionally // [query_params][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.query_params]. If you want // to receive an audio response, it should also contain // [reply_audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.reply_audio_config]. // The message must not contain // [input][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input]. // // 2. If [config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] in the first message // was set to [audio_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.audio_config], // all subsequent messages must contain // [input_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_audio] to continue // with Speech recognition. // However, note that: // // * Dialogflow will bill you for the audio so far. // * Dialogflow discards all Speech recognition results in favor of the // text input. // // 3. If [StreamingAnalyzeContentRequest.config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.config] in the first message was set // to [StreamingAnalyzeContentRequest.text_config][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.text_config], then the second message // must contain only [input_text][google.cloud.dialogflow.v2.StreamingAnalyzeContentRequest.input_text]. // Moreover, you must not send more than two messages. // // After you sent all input, you must half-close or abort the request stream. message StreamingAnalyzeContentRequest { // Required. The name of the participant this text comes from. // Format: `projects//locations//conversations//participants/`. string participant = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Participant" } ]; // Required. The input config. oneof config { // Instructs the speech recognizer how to process the speech audio. InputAudioConfig audio_config = 2; // The natural language text to be processed. InputTextConfig text_config = 3; } // Speech synthesis configuration. // The speech synthesis settings for a virtual agent that may be configured // for the associated conversation profile are not used when calling // StreamingAnalyzeContent. If this configuration is not supplied, speech // synthesis is disabled. OutputAudioConfig reply_audio_config = 4; // Required. The input. oneof input { // The input audio content to be recognized. Must be sent if `audio_config` // is set in the first message. The complete audio over all streaming // messages must not exceed 1 minute. bytes input_audio = 5; // The UTF-8 encoded natural language text to be processed. Must be sent if // `text_config` is set in the first message. Text length must not exceed // 256 bytes. The `input_text` field can be only sent once. string input_text = 6; // The DTMF digits used to invoke intent and fill in parameter value. // // This input is ignored if the previous response indicated that DTMF input // is not accepted. TelephonyDtmfEvents input_dtmf = 9; } // Parameters for a Dialogflow virtual-agent query. QueryParameters query_params = 7; } // The top-level message returned from the `StreamingAnalyzeContent` method. // // Multiple response messages can be returned in order: // // 1. If the input was set to streaming audio, the first one or more messages // contain `recognition_result`. Each `recognition_result` represents a more // complete transcript of what the user said. The last `recognition_result` // has `is_final` set to `true`. // // 2. The next message contains `reply_text` and optionally `reply_audio` // returned by an agent. This message may also contain // `automated_agent_reply`. message StreamingAnalyzeContentResponse { // The result of speech recognition. StreamingRecognitionResult recognition_result = 1; // The output text content. // This field is set if an automated agent responded with a text for the user. string reply_text = 2; // The audio data bytes encoded as specified in the request. // This field is set if: // // - The `reply_audio_config` field is specified in the request. // - The automated agent, which this output comes from, responded with audio. // In such case, the `reply_audio.config` field contains settings used to // synthesize the speech. // // In some scenarios, multiple output audio fields may be present in the // response structure. In these cases, only the top-most-level audio output // has content. OutputAudio reply_audio = 3; // Only set if a Dialogflow automated agent has responded. // Note that: [AutomatedAgentReply.detect_intent_response.output_audio][] // and [AutomatedAgentReply.detect_intent_response.output_audio_config][] // are always empty, use [reply_audio][google.cloud.dialogflow.v2.StreamingAnalyzeContentResponse.reply_audio] instead. AutomatedAgentReply automated_agent_reply = 4; // Message analyzed by CCAI. Message message = 6; // The suggestions for most recent human agent. The order is the same as // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of // [HumanAgentAssistantConfig.human_agent_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.human_agent_suggestion_config]. repeated SuggestionResult human_agent_suggestion_results = 7; // The suggestions for end user. The order is the same as // [HumanAgentAssistantConfig.SuggestionConfig.feature_configs][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.SuggestionConfig.feature_configs] of // [HumanAgentAssistantConfig.end_user_suggestion_config][google.cloud.dialogflow.v2.HumanAgentAssistantConfig.end_user_suggestion_config]. repeated SuggestionResult end_user_suggestion_results = 8; // Indicates the parameters of DTMF. DtmfParameters dtmf_parameters = 10; } // The request message for [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles]. message SuggestArticlesRequest { // Required. The name of the participant to fetch suggestion for. // Format: `projects//locations//conversations//participants/`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Participant" } ]; // The name of the latest conversation message to compile suggestion // for. If empty, it will be the latest message of the conversation. // // Format: `projects//locations//conversations//messages/`. string latest_message = 2 [(google.api.resource_reference) = { type: "dialogflow.googleapis.com/Message" }]; // Max number of messages prior to and including // [latest_message][google.cloud.dialogflow.v2.SuggestArticlesRequest.latest_message] to use as context // when compiling the suggestion. By default 20 and at most 50. int32 context_size = 3; } // The response message for [Participants.SuggestArticles][google.cloud.dialogflow.v2.Participants.SuggestArticles]. message SuggestArticlesResponse { // Articles ordered by score in descending order. repeated ArticleAnswer article_answers = 1; // The name of the latest conversation message used to compile // suggestion for. // // Format: `projects//locations//conversations//messages/`. string latest_message = 2; // Number of messages prior to and including // [latest_message][google.cloud.dialogflow.v2.SuggestArticlesResponse.latest_message] to compile the // suggestion. It may be smaller than the // [SuggestArticlesRequest.context_size][google.cloud.dialogflow.v2.SuggestArticlesRequest.context_size] field in the request if there // aren't that many messages in the conversation. int32 context_size = 3; } // The request message for [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers]. message SuggestFaqAnswersRequest { // Required. The name of the participant to fetch suggestion for. // Format: `projects//locations//conversations//participants/`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { type: "dialogflow.googleapis.com/Participant" } ]; // The name of the latest conversation message to compile suggestion // for. If empty, it will be the latest message of the conversation. // // Format: `projects//locations//conversations//messages/`. string latest_message = 2 [(google.api.resource_reference) = { type: "dialogflow.googleapis.com/Message" }]; // Max number of messages prior to and including // [latest_message] to use as context when compiling the // suggestion. By default 20 and at most 50. int32 context_size = 3; } // The request message for [Participants.SuggestFaqAnswers][google.cloud.dialogflow.v2.Participants.SuggestFaqAnswers]. message SuggestFaqAnswersResponse { // Answers extracted from FAQ documents. repeated FaqAnswer faq_answers = 1; // The name of the latest conversation message used to compile // suggestion for. // // Format: `projects//locations//conversations//messages/`. string latest_message = 2; // Number of messages prior to and including // [latest_message][google.cloud.dialogflow.v2.SuggestFaqAnswersResponse.latest_message] to compile the // suggestion. It may be smaller than the // [SuggestFaqAnswersRequest.context_size][google.cloud.dialogflow.v2.SuggestFaqAnswersRequest.context_size] field in the request if there // aren't that many messages in the conversation. int32 context_size = 3; } // Represents the natural language speech audio to be processed. message AudioInput { // Required. Instructs the speech recognizer how to process the speech audio. InputAudioConfig config = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The natural language speech audio to be processed. // A single request can contain up to 1 minute of speech audio data. // The transcribed text cannot contain more than 256 bytes. bytes audio = 2 [(google.api.field_behavior) = REQUIRED]; } // Represents the natural language speech audio to be played to the end user. message OutputAudio { // Instructs the speech synthesizer how to generate the speech // audio. OutputAudioConfig config = 1; // The natural language speech audio. bytes audio = 2; } // Represents a response from an automated agent. message AutomatedAgentReply { // Response of the Dialogflow [Sessions.DetectIntent][google.cloud.dialogflow.v2.Sessions.DetectIntent] call. DetectIntentResponse detect_intent_response = 1; } // Represents article answer. message ArticleAnswer { // The article title. string title = 1; // The article URI. string uri = 2; // Article snippets. repeated string snippets = 3; // Article match confidence. // The system's confidence score that this article is a good match for this // conversation, as a value from 0.0 (completely uncertain) to 1.0 // (completely certain). float confidence = 4; // A map that contains metadata about the answer and the // document from which it originates. map metadata = 5; // The name of answer record, in the format of // "projects//locations//answerRecords/" string answer_record = 6; } // Represents answer from "frequently asked questions". message FaqAnswer { // The piece of text from the `source` knowledge base document. string answer = 1; // The system's confidence score that this Knowledge answer is a good match // for this conversational query, range from 0.0 (completely uncertain) // to 1.0 (completely certain). float confidence = 2; // The corresponding FAQ question. string question = 3; // Indicates which Knowledge Document this answer was extracted // from. // Format: `projects//locations//agent/knowledgeBases//documents/`. string source = 4; // A map that contains metadata about the answer and the // document from which it originates. map metadata = 5; // The name of answer record, in the format of // "projects//locations//answerRecords/" string answer_record = 6; } // One response of different type of suggestion response which is used in // the response of [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and // [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent], as well as [HumanAgentAssistantEvent][google.cloud.dialogflow.v2.HumanAgentAssistantEvent]. message SuggestionResult { // Different type of suggestion response. oneof suggestion_response { // Error status if the request failed. google.rpc.Status error = 1; // SuggestArticlesResponse if request is for ARTICLE_SUGGESTION. SuggestArticlesResponse suggest_articles_response = 2; // SuggestFaqAnswersResponse if request is for FAQ_ANSWER. SuggestFaqAnswersResponse suggest_faq_answers_response = 3; } } // Defines the language used in the input text. message InputTextConfig { // Required. The language of this conversational query. See [Language // Support](https://cloud.google.com/dialogflow/docs/reference/language) // for a list of the currently supported language codes. string language_code = 1 [(google.api.field_behavior) = REQUIRED]; } // Represents a part of a message possibly annotated with an entity. The part // can be an entity or purely a part of the message between two entities or // message start/end. message AnnotatedMessagePart { // A part of a message possibly annotated with an entity. string text = 1; // The [Dialogflow system entity // type](https://cloud.google.com/dialogflow/docs/reference/system-entities) // of this message part. If this is empty, Dialogflow could not annotate the // phrase part with a system entity. string entity_type = 2; // The [Dialogflow system entity formatted value // ](https://cloud.google.com/dialogflow/docs/reference/system-entities) of // this message part. For example for a system entity of type // `@sys.unit-currency`, this may contain: //
  // {
  //   "amount": 5,
  //   "currency": "USD"
  // }
  // 
google.protobuf.Value formatted_value = 3; } // Represents the result of annotation for the message. message MessageAnnotation { // The collection of annotated message parts ordered by their // position in the message. You can recover the annotated message by // concatenating [AnnotatedMessagePart.text]. repeated AnnotatedMessagePart parts = 1; // Indicates whether the text message contains entities. bool contain_entities = 2; }