17
17
18
18
from google .cloud .aiplatform_v1 .types import encryption_spec as gca_encryption_spec
19
19
from google .cloud .aiplatform_v1 .types import explanation
20
+ from google .cloud .aiplatform_v1 .types import io
20
21
from google .cloud .aiplatform_v1 .types import machine_resources
21
22
from google .protobuf import timestamp_pb2 # type: ignore
22
23
23
24
24
25
__protobuf__ = proto .module (
25
26
package = "google.cloud.aiplatform.v1" ,
26
- manifest = {"Endpoint" , "DeployedModel" , "PrivateEndpoints" ,},
27
+ manifest = {
28
+ "Endpoint" ,
29
+ "DeployedModel" ,
30
+ "PrivateEndpoints" ,
31
+ "PredictRequestResponseLoggingConfig" ,
32
+ },
27
33
)
28
34
29
35
@@ -113,6 +119,9 @@ class Endpoint(proto.Message):
113
119
associated with this Endpoint if monitoring is enabled by
114
120
[CreateModelDeploymentMonitoringJob][]. Format:
115
121
``projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}``
122
+ predict_request_response_logging_config (google.cloud.aiplatform_v1.types.PredictRequestResponseLoggingConfig):
123
+ Configures the request-response logging for
124
+ online prediction.
116
125
"""
117
126
118
127
name = proto .Field (proto .STRING , number = 1 ,)
@@ -132,6 +141,9 @@ class Endpoint(proto.Message):
132
141
network = proto .Field (proto .STRING , number = 13 ,)
133
142
enable_private_service_connect = proto .Field (proto .BOOL , number = 17 ,)
134
143
model_deployment_monitoring_job = proto .Field (proto .STRING , number = 14 ,)
144
+ predict_request_response_logging_config = proto .Field (
145
+ proto .MESSAGE , number = 18 , message = "PredictRequestResponseLoggingConfig" ,
146
+ )
135
147
136
148
137
149
class DeployedModel (proto .Message ):
@@ -286,4 +298,31 @@ class PrivateEndpoints(proto.Message):
286
298
service_attachment = proto .Field (proto .STRING , number = 4 ,)
287
299
288
300
301
+ class PredictRequestResponseLoggingConfig (proto .Message ):
302
+ r"""Configuration for logging request-response to a BigQuery
303
+ table.
304
+
305
+ Attributes:
306
+ enabled (bool):
307
+ If logging is enabled or not.
308
+ sampling_rate (float):
309
+ Percentage of requests to be logged, expressed as a fraction
310
+ in range(0,1].
311
+ bigquery_destination (google.cloud.aiplatform_v1.types.BigQueryDestination):
312
+ BigQuery table for logging. If only given project, a new
313
+ dataset will be created with name
314
+ ``logging_<endpoint-display-name>_<endpoint-id>`` where will
315
+ be made BigQuery-dataset-name compatible (e.g. most special
316
+ characters will become underscores). If no table name is
317
+ given, a new table will be created with name
318
+ ``request_response_logging``
319
+ """
320
+
321
+ enabled = proto .Field (proto .BOOL , number = 1 ,)
322
+ sampling_rate = proto .Field (proto .DOUBLE , number = 2 ,)
323
+ bigquery_destination = proto .Field (
324
+ proto .MESSAGE , number = 3 , message = io .BigQueryDestination ,
325
+ )
326
+
327
+
289
328
__all__ = tuple (sorted (__protobuf__ .manifest ))
0 commit comments