Skip to content

Commit 16ebab3

Browse files
feat: allowed custom to specify webhook headers through query parameters (#724)
* chore: fixing syntax error in the GAPIC metadata JSON file Use gapic-generator-typescript v1.2.8. Committer: @alexander-fenster PiperOrigin-RevId: 345712055 Source-Author: Google APIs <[email protected]> Source-Date: Fri Dec 4 10:54:47 2020 -0800 Source-Repo: googleapis/googleapis Source-Sha: 16dd59787d6ce130ab66066c02eeea9dac0c8f0e Source-Link: googleapis/googleapis@16dd597 * feat: allowed custom to specify webhook headers through query parameters docs: suggested to always use version for production traffic when calling DetectIntent, mark match_mode in Agent message as deprecated PiperOrigin-RevId: 345742559 Source-Author: Google APIs <[email protected]> Source-Date: Fri Dec 4 13:15:39 2020 -0800 Source-Repo: googleapis/googleapis Source-Sha: 519e9dcdff23cc14d48f85e9e00b6214ec57967e Source-Link: googleapis/googleapis@519e9dc
1 parent ce6eb87 commit 16ebab3

File tree

9 files changed

+1243
-565
lines changed

9 files changed

+1243
-565
lines changed

packages/google-cloud-dialogflow/protos/google/cloud/dialogflow/v2/agent.proto

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,10 @@ message Agent {
267267
bool enable_logging = 8 [(google.api.field_behavior) = OPTIONAL];
268268

269269
// Optional. Determines how intents are detected from user queries.
270-
MatchMode match_mode = 9 [(google.api.field_behavior) = OPTIONAL];
270+
MatchMode match_mode = 9 [
271+
deprecated = true,
272+
(google.api.field_behavior) = OPTIONAL
273+
];
271274

272275
// Optional. To filter out false positive results and still get variety in
273276
// matched natural language inputs for your agent, you can tune the machine

packages/google-cloud-dialogflow/protos/google/cloud/dialogflow/v2/session.proto

Lines changed: 82 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,10 @@ service Sessions {
5757
// as a result. This method is not idempotent, because it may cause contexts
5858
// and session entity types to be updated, which in turn might affect
5959
// results of future queries.
60+
//
61+
// Note: Always use agent versions for production traffic.
62+
// See [Versions and
63+
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
6064
rpc DetectIntent(DetectIntentRequest) returns (DetectIntentResponse) {
6165
option (google.api.http) = {
6266
post: "/v2/{session=projects/*/agent/sessions/*}:detectIntent"
@@ -72,8 +76,12 @@ service Sessions {
7276
// Processes a natural language query in audio format in a streaming fashion
7377
// and returns structured, actionable data as a result. This method is only
7478
// available via the gRPC API (not REST).
75-
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest) returns (stream StreamingDetectIntentResponse) {
76-
}
79+
//
80+
// Note: Always use agent versions for production traffic.
81+
// See [Versions and
82+
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
83+
rpc StreamingDetectIntent(stream StreamingDetectIntentRequest)
84+
returns (stream StreamingDetectIntentResponse) {}
7785
}
7886

7987
// The request to detect user's intent.
@@ -90,6 +98,10 @@ message DetectIntentRequest {
9098
//
9199
// For more information, see the [API interactions
92100
// guide](https://cloud.google.com/dialogflow/docs/api-overview).
101+
//
102+
// Note: Always use agent versions for production traffic.
103+
// See [Versions and
104+
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
93105
string session = 1 [
94106
(google.api.field_behavior) = REQUIRED,
95107
(google.api.resource_reference) = {
@@ -115,12 +127,14 @@ message DetectIntentRequest {
115127
// configured, no output audio is generated.
116128
OutputAudioConfig output_audio_config = 4;
117129

118-
// Mask for [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] indicating which settings in this
119-
// request-level config should override speech synthesizer settings defined at
120-
// agent-level.
130+
// Mask for
131+
// [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config]
132+
// indicating which settings in this request-level config should override
133+
// speech synthesizer settings defined at agent-level.
121134
//
122-
// If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config] replaces the agent-level
123-
// config in its entirety.
135+
// If unspecified or empty,
136+
// [output_audio_config][google.cloud.dialogflow.v2.DetectIntentRequest.output_audio_config]
137+
// replaces the agent-level config in its entirety.
124138
google.protobuf.FieldMask output_audio_config_mask = 7;
125139

126140
// The natural language speech audio to be processed. This field
@@ -192,6 +206,17 @@ message QueryParameters {
192206
// Configures the type of sentiment analysis to perform. If not
193207
// provided, sentiment analysis is not performed.
194208
SentimentAnalysisRequestConfig sentiment_analysis_request_config = 10;
209+
210+
// This field can be used to pass HTTP headers for a webhook
211+
// call. These headers will be sent to webhook along with the headers that
212+
// have been configured through the Dialogflow web console. The headers
213+
// defined within this field will overwrite the headers configured through the
214+
// Dialogflow console if there is a conflict. Header names are
215+
// case-insensitive. Google's specified headers are not allowed. Including:
216+
// "Host", "Content-Length", "Connection", "From", "User-Agent",
217+
// "Accept-Encoding", "If-Modified-Since", "If-None-Match", "X-Forwarded-For",
218+
// etc.
219+
map<string, string> webhook_headers = 14;
195220
}
196221

197222
// Represents the query input. It can contain either:
@@ -325,25 +350,29 @@ message QueryResult {
325350
}
326351

327352
// The top-level message sent by the client to the
328-
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent] method.
353+
// [Sessions.StreamingDetectIntent][google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent]
354+
// method.
329355
//
330356
// Multiple request messages should be sent in order:
331357
//
332358
// 1. The first message must contain
333359
// [session][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.session],
334-
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] plus optionally
335-
// [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params]. If the client
336-
// wants to receive an audio response, it should also contain
360+
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input]
361+
// plus optionally
362+
// [query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
363+
// If the client wants to receive an audio response, it should also contain
337364
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config].
338365
// The message must not contain
339366
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio].
340-
// 2. If [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input] was set to
341-
// [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig], all subsequent
342-
// messages must contain
343-
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio] to continue with
344-
// Speech recognition.
345-
// If you decide to rather detect an intent from text input after you
346-
// already started Speech recognition, please send a message with
367+
// 2. If
368+
// [query_input][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input]
369+
// was set to
370+
// [query_input.audio_config][google.cloud.dialogflow.v2.InputAudioConfig],
371+
// all subsequent messages must contain
372+
// [input_audio][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio]
373+
// to continue with Speech recognition. If you decide to rather detect an
374+
// intent from text input after you already started Speech recognition,
375+
// please send a message with
347376
// [query_input.text][google.cloud.dialogflow.v2.QueryInput.text].
348377
//
349378
// However, note that:
@@ -368,6 +397,10 @@ message StreamingDetectIntentRequest {
368397
//
369398
// For more information, see the [API interactions
370399
// guide](https://cloud.google.com/dialogflow/docs/api-overview).
400+
//
401+
// Note: Always use agent versions for production traffic.
402+
// See [Versions and
403+
// environments](https://cloud.google.com/dialogflow/es/docs/agents-versions).
371404
string session = 1 [
372405
(google.api.field_behavior) = REQUIRED,
373406
(google.api.resource_reference) = {
@@ -388,27 +421,30 @@ message StreamingDetectIntentRequest {
388421
// 3. an event that specifies which intent to trigger.
389422
QueryInput query_input = 3 [(google.api.field_behavior) = REQUIRED];
390423

391-
// Please use [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance] instead.
392-
// If `false` (default), recognition does not cease until
393-
// the client closes the stream. If `true`, the recognizer will detect a
394-
// single spoken utterance in input audio. Recognition ceases when it detects
395-
// the audio's voice has stopped or paused. In this case, once a detected
396-
// intent is received, the client should close the stream and start a new
397-
// request with a new stream as needed.
398-
// This setting is ignored when `query_input` is a piece of text or an event.
424+
// Please use
425+
// [InputAudioConfig.single_utterance][google.cloud.dialogflow.v2.InputAudioConfig.single_utterance]
426+
// instead. If `false` (default), recognition does not cease until the client
427+
// closes the stream. If `true`, the recognizer will detect a single spoken
428+
// utterance in input audio. Recognition ceases when it detects the audio's
429+
// voice has stopped or paused. In this case, once a detected intent is
430+
// received, the client should close the stream and start a new request with a
431+
// new stream as needed. This setting is ignored when `query_input` is a piece
432+
// of text or an event.
399433
bool single_utterance = 4 [deprecated = true];
400434

401435
// Instructs the speech synthesizer how to generate the output
402436
// audio. If this field is not set and agent-level speech synthesizer is not
403437
// configured, no output audio is generated.
404438
OutputAudioConfig output_audio_config = 5;
405439

406-
// Mask for [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] indicating which settings in this
407-
// request-level config should override speech synthesizer settings defined at
408-
// agent-level.
440+
// Mask for
441+
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config]
442+
// indicating which settings in this request-level config should override
443+
// speech synthesizer settings defined at agent-level.
409444
//
410-
// If unspecified or empty, [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config] replaces the agent-level
411-
// config in its entirety.
445+
// If unspecified or empty,
446+
// [output_audio_config][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.output_audio_config]
447+
// replaces the agent-level config in its entirety.
412448
google.protobuf.FieldMask output_audio_config_mask = 7;
413449

414450
// The input audio content to be recognized. Must be sent if
@@ -503,11 +539,12 @@ message StreamingRecognitionResult {
503539

504540
// Event indicates that the server has detected the end of the user's speech
505541
// utterance and expects no additional inputs.
506-
// Therefore, the server will not process additional audio (although it may subsequently return additional results). The
507-
// client should stop sending additional audio data, half-close the gRPC
508-
// connection, and wait for any additional results until the server closes
509-
// the gRPC connection. This message is only sent if `single_utterance` was
510-
// set to `true`, and is not used otherwise.
542+
// Therefore, the server will not process additional audio (although it may
543+
// subsequently return additional results). The client should stop sending
544+
// additional audio data, half-close the gRPC connection, and wait for any
545+
// additional results until the server closes the gRPC connection. This
546+
// message is only sent if `single_utterance` was set to `true`, and is not
547+
// used otherwise.
511548
END_OF_SINGLE_UTTERANCE = 2;
512549
}
513550

@@ -534,7 +571,8 @@ message StreamingRecognitionResult {
534571
float confidence = 4;
535572

536573
// Word-specific information for the words recognized by Speech in
537-
// [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript]. Populated if and only if `message_type` = `TRANSCRIPT` and
574+
// [transcript][google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript].
575+
// Populated if and only if `message_type` = `TRANSCRIPT` and
538576
// [InputAudioConfig.enable_word_info] is set.
539577
repeated SpeechWordInfo speech_word_info = 7;
540578

@@ -601,11 +639,14 @@ message SentimentAnalysisRequestConfig {
601639
// and identifies the prevailing subjective opinion, especially to determine a
602640
// user's attitude as positive, negative, or neutral.
603641
// For [Participants.DetectIntent][], it needs to be configured in
604-
// [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params]. For
605-
// [Participants.StreamingDetectIntent][], it needs to be configured in
642+
// [DetectIntentRequest.query_params][google.cloud.dialogflow.v2.DetectIntentRequest.query_params].
643+
// For [Participants.StreamingDetectIntent][], it needs to be configured in
606644
// [StreamingDetectIntentRequest.query_params][google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params].
607-
// And for [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent] and
608-
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent], it needs to be configured in
645+
// And for
646+
// [Participants.AnalyzeContent][google.cloud.dialogflow.v2.Participants.AnalyzeContent]
647+
// and
648+
// [Participants.StreamingAnalyzeContent][google.cloud.dialogflow.v2.Participants.StreamingAnalyzeContent],
649+
// it needs to be configured in
609650
// [ConversationProfile.human_agent_assistant_config][google.cloud.dialogflow.v2.ConversationProfile.human_agent_assistant_config]
610651
message SentimentAnalysisResult {
611652
// The sentiment analysis result for `query_text`.

packages/google-cloud-dialogflow/protos/protos.d.ts

Lines changed: 6 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)